1
2/*
3 * This file is part of wlcore
4 *
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2011-2013 Texas Instruments Inc.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include <linux/module.h>
25#include <linux/firmware.h>
26#include <linux/etherdevice.h>
27#include <linux/vmalloc.h>
28#include <linux/wl12xx.h>
29#include <linux/interrupt.h>
30
31#include "wlcore.h"
32#include "debug.h"
33#include "wl12xx_80211.h"
34#include "io.h"
35#include "tx.h"
36#include "ps.h"
37#include "init.h"
38#include "debugfs.h"
39#include "testmode.h"
40#include "vendor_cmd.h"
41#include "scan.h"
42#include "hw_ops.h"
43#include "sysfs.h"
44
45#define WL1271_BOOT_RETRIES 3
46
47static char *fwlog_param;
48static int fwlog_mem_blocks = -1;
49static int bug_on_recovery = -1;
50static int no_recovery     = -1;
51
52static void __wl1271_op_remove_interface(struct wl1271 *wl,
53					 struct ieee80211_vif *vif,
54					 bool reset_tx_queues);
55static void wlcore_op_stop_locked(struct wl1271 *wl);
56static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
57
58static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
59{
60	int ret;
61
62	if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
63		return -EINVAL;
64
65	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
66		return 0;
67
68	if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
69		return 0;
70
71	ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
72	if (ret < 0)
73		return ret;
74
75	wl1271_info("Association completed.");
76	return 0;
77}
78
79static void wl1271_reg_notify(struct wiphy *wiphy,
80			      struct regulatory_request *request)
81{
82	struct ieee80211_supported_band *band;
83	struct ieee80211_channel *ch;
84	int i;
85	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
86	struct wl1271 *wl = hw->priv;
87
88	band = wiphy->bands[IEEE80211_BAND_5GHZ];
89	for (i = 0; i < band->n_channels; i++) {
90		ch = &band->channels[i];
91		if (ch->flags & IEEE80211_CHAN_DISABLED)
92			continue;
93
94		if (ch->flags & IEEE80211_CHAN_RADAR)
95			ch->flags |= IEEE80211_CHAN_NO_IR;
96
97	}
98
99	wlcore_regdomain_config(wl);
100}
101
102static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
103				   bool enable)
104{
105	int ret = 0;
106
107	/* we should hold wl->mutex */
108	ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
109	if (ret < 0)
110		goto out;
111
112	if (enable)
113		set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
114	else
115		clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
116out:
117	return ret;
118}
119
120/*
121 * this function is being called when the rx_streaming interval
122 * has beed changed or rx_streaming should be disabled
123 */
124int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
125{
126	int ret = 0;
127	int period = wl->conf.rx_streaming.interval;
128
129	/* don't reconfigure if rx_streaming is disabled */
130	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
131		goto out;
132
133	/* reconfigure/disable according to new streaming_period */
134	if (period &&
135	    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
136	    (wl->conf.rx_streaming.always ||
137	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
138		ret = wl1271_set_rx_streaming(wl, wlvif, true);
139	else {
140		ret = wl1271_set_rx_streaming(wl, wlvif, false);
141		/* don't cancel_work_sync since we might deadlock */
142		del_timer_sync(&wlvif->rx_streaming_timer);
143	}
144out:
145	return ret;
146}
147
148static void wl1271_rx_streaming_enable_work(struct work_struct *work)
149{
150	int ret;
151	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
152						rx_streaming_enable_work);
153	struct wl1271 *wl = wlvif->wl;
154
155	mutex_lock(&wl->mutex);
156
157	if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
158	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
159	    (!wl->conf.rx_streaming.always &&
160	     !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
161		goto out;
162
163	if (!wl->conf.rx_streaming.interval)
164		goto out;
165
166	ret = wl1271_ps_elp_wakeup(wl);
167	if (ret < 0)
168		goto out;
169
170	ret = wl1271_set_rx_streaming(wl, wlvif, true);
171	if (ret < 0)
172		goto out_sleep;
173
174	/* stop it after some time of inactivity */
175	mod_timer(&wlvif->rx_streaming_timer,
176		  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
177
178out_sleep:
179	wl1271_ps_elp_sleep(wl);
180out:
181	mutex_unlock(&wl->mutex);
182}
183
184static void wl1271_rx_streaming_disable_work(struct work_struct *work)
185{
186	int ret;
187	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
188						rx_streaming_disable_work);
189	struct wl1271 *wl = wlvif->wl;
190
191	mutex_lock(&wl->mutex);
192
193	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
194		goto out;
195
196	ret = wl1271_ps_elp_wakeup(wl);
197	if (ret < 0)
198		goto out;
199
200	ret = wl1271_set_rx_streaming(wl, wlvif, false);
201	if (ret)
202		goto out_sleep;
203
204out_sleep:
205	wl1271_ps_elp_sleep(wl);
206out:
207	mutex_unlock(&wl->mutex);
208}
209
210static void wl1271_rx_streaming_timer(unsigned long data)
211{
212	struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
213	struct wl1271 *wl = wlvif->wl;
214	ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
215}
216
217/* wl->mutex must be taken */
218void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
219{
220	/* if the watchdog is not armed, don't do anything */
221	if (wl->tx_allocated_blocks == 0)
222		return;
223
224	cancel_delayed_work(&wl->tx_watchdog_work);
225	ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
226		msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
227}
228
229static void wl12xx_tx_watchdog_work(struct work_struct *work)
230{
231	struct delayed_work *dwork;
232	struct wl1271 *wl;
233
234	dwork = container_of(work, struct delayed_work, work);
235	wl = container_of(dwork, struct wl1271, tx_watchdog_work);
236
237	mutex_lock(&wl->mutex);
238
239	if (unlikely(wl->state != WLCORE_STATE_ON))
240		goto out;
241
242	/* Tx went out in the meantime - everything is ok */
243	if (unlikely(wl->tx_allocated_blocks == 0))
244		goto out;
245
246	/*
247	 * if a ROC is in progress, we might not have any Tx for a long
248	 * time (e.g. pending Tx on the non-ROC channels)
249	 */
250	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
251		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
252			     wl->conf.tx.tx_watchdog_timeout);
253		wl12xx_rearm_tx_watchdog_locked(wl);
254		goto out;
255	}
256
257	/*
258	 * if a scan is in progress, we might not have any Tx for a long
259	 * time
260	 */
261	if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
262		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
263			     wl->conf.tx.tx_watchdog_timeout);
264		wl12xx_rearm_tx_watchdog_locked(wl);
265		goto out;
266	}
267
268	/*
269	* AP might cache a frame for a long time for a sleeping station,
270	* so rearm the timer if there's an AP interface with stations. If
271	* Tx is genuinely stuck we will most hopefully discover it when all
272	* stations are removed due to inactivity.
273	*/
274	if (wl->active_sta_count) {
275		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
276			     " %d stations",
277			      wl->conf.tx.tx_watchdog_timeout,
278			      wl->active_sta_count);
279		wl12xx_rearm_tx_watchdog_locked(wl);
280		goto out;
281	}
282
283	wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
284		     wl->conf.tx.tx_watchdog_timeout);
285	wl12xx_queue_recovery_work(wl);
286
287out:
288	mutex_unlock(&wl->mutex);
289}
290
291static void wlcore_adjust_conf(struct wl1271 *wl)
292{
293	/* Adjust settings according to optional module parameters */
294
295	/* Firmware Logger params */
296	if (fwlog_mem_blocks != -1) {
297		if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS &&
298		    fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) {
299			wl->conf.fwlog.mem_blocks = fwlog_mem_blocks;
300		} else {
301			wl1271_error(
302				"Illegal fwlog_mem_blocks=%d using default %d",
303				fwlog_mem_blocks, wl->conf.fwlog.mem_blocks);
304		}
305	}
306
307	if (fwlog_param) {
308		if (!strcmp(fwlog_param, "continuous")) {
309			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
310		} else if (!strcmp(fwlog_param, "ondemand")) {
311			wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
312		} else if (!strcmp(fwlog_param, "dbgpins")) {
313			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
314			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
315		} else if (!strcmp(fwlog_param, "disable")) {
316			wl->conf.fwlog.mem_blocks = 0;
317			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
318		} else {
319			wl1271_error("Unknown fwlog parameter %s", fwlog_param);
320		}
321	}
322
323	if (bug_on_recovery != -1)
324		wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
325
326	if (no_recovery != -1)
327		wl->conf.recovery.no_recovery = (u8) no_recovery;
328}
329
330static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
331					struct wl12xx_vif *wlvif,
332					u8 hlid, u8 tx_pkts)
333{
334	bool fw_ps;
335
336	fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
337
338	/*
339	 * Wake up from high level PS if the STA is asleep with too little
340	 * packets in FW or if the STA is awake.
341	 */
342	if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
343		wl12xx_ps_link_end(wl, wlvif, hlid);
344
345	/*
346	 * Start high-level PS if the STA is asleep with enough blocks in FW.
347	 * Make an exception if this is the only connected link. In this
348	 * case FW-memory congestion is less of a problem.
349	 * Note that a single connected STA means 2*ap_count + 1 active links,
350	 * since we must account for the global and broadcast AP links
351	 * for each AP. The "fw_ps" check assures us the other link is a STA
352	 * connected to the AP. Otherwise the FW would not set the PSM bit.
353	 */
354	else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
355		 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
356		wl12xx_ps_link_start(wl, wlvif, hlid, true);
357}
358
359static void wl12xx_irq_update_links_status(struct wl1271 *wl,
360					   struct wl12xx_vif *wlvif,
361					   struct wl_fw_status *status)
362{
363	unsigned long cur_fw_ps_map;
364	u8 hlid;
365
366	cur_fw_ps_map = status->link_ps_bitmap;
367	if (wl->ap_fw_ps_map != cur_fw_ps_map) {
368		wl1271_debug(DEBUG_PSM,
369			     "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
370			     wl->ap_fw_ps_map, cur_fw_ps_map,
371			     wl->ap_fw_ps_map ^ cur_fw_ps_map);
372
373		wl->ap_fw_ps_map = cur_fw_ps_map;
374	}
375
376	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
377		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
378					    wl->links[hlid].allocated_pkts);
379}
380
381static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
382{
383	struct wl12xx_vif *wlvif;
384	struct timespec ts;
385	u32 old_tx_blk_count = wl->tx_blocks_available;
386	int avail, freed_blocks;
387	int i;
388	int ret;
389	struct wl1271_link *lnk;
390
391	ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
392				   wl->raw_fw_status,
393				   wl->fw_status_len, false);
394	if (ret < 0)
395		return ret;
396
397	wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
398
399	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
400		     "drv_rx_counter = %d, tx_results_counter = %d)",
401		     status->intr,
402		     status->fw_rx_counter,
403		     status->drv_rx_counter,
404		     status->tx_results_counter);
405
406	for (i = 0; i < NUM_TX_QUEUES; i++) {
407		/* prevent wrap-around in freed-packets counter */
408		wl->tx_allocated_pkts[i] -=
409				(status->counters.tx_released_pkts[i] -
410				wl->tx_pkts_freed[i]) & 0xff;
411
412		wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
413	}
414
415
416	for_each_set_bit(i, wl->links_map, wl->num_links) {
417		u8 diff;
418		lnk = &wl->links[i];
419
420		/* prevent wrap-around in freed-packets counter */
421		diff = (status->counters.tx_lnk_free_pkts[i] -
422		       lnk->prev_freed_pkts) & 0xff;
423
424		if (diff == 0)
425			continue;
426
427		lnk->allocated_pkts -= diff;
428		lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
429
430		/* accumulate the prev_freed_pkts counter */
431		lnk->total_freed_pkts += diff;
432	}
433
434	/* prevent wrap-around in total blocks counter */
435	if (likely(wl->tx_blocks_freed <= status->total_released_blks))
436		freed_blocks = status->total_released_blks -
437			       wl->tx_blocks_freed;
438	else
439		freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
440			       status->total_released_blks;
441
442	wl->tx_blocks_freed = status->total_released_blks;
443
444	wl->tx_allocated_blocks -= freed_blocks;
445
446	/*
447	 * If the FW freed some blocks:
448	 * If we still have allocated blocks - re-arm the timer, Tx is
449	 * not stuck. Otherwise, cancel the timer (no Tx currently).
450	 */
451	if (freed_blocks) {
452		if (wl->tx_allocated_blocks)
453			wl12xx_rearm_tx_watchdog_locked(wl);
454		else
455			cancel_delayed_work(&wl->tx_watchdog_work);
456	}
457
458	avail = status->tx_total - wl->tx_allocated_blocks;
459
460	/*
461	 * The FW might change the total number of TX memblocks before
462	 * we get a notification about blocks being released. Thus, the
463	 * available blocks calculation might yield a temporary result
464	 * which is lower than the actual available blocks. Keeping in
465	 * mind that only blocks that were allocated can be moved from
466	 * TX to RX, tx_blocks_available should never decrease here.
467	 */
468	wl->tx_blocks_available = max((int)wl->tx_blocks_available,
469				      avail);
470
471	/* if more blocks are available now, tx work can be scheduled */
472	if (wl->tx_blocks_available > old_tx_blk_count)
473		clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
474
475	/* for AP update num of allocated TX blocks per link and ps status */
476	wl12xx_for_each_wlvif_ap(wl, wlvif) {
477		wl12xx_irq_update_links_status(wl, wlvif, status);
478	}
479
480	/* update the host-chipset time offset */
481	getnstimeofday(&ts);
482	wl->time_offset = (timespec_to_ns(&ts) >> 10) -
483		(s64)(status->fw_localtime);
484
485	wl->fw_fast_lnk_map = status->link_fast_bitmap;
486
487	return 0;
488}
489
490static void wl1271_flush_deferred_work(struct wl1271 *wl)
491{
492	struct sk_buff *skb;
493
494	/* Pass all received frames to the network stack */
495	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
496		ieee80211_rx_ni(wl->hw, skb);
497
498	/* Return sent skbs to the network stack */
499	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
500		ieee80211_tx_status_ni(wl->hw, skb);
501}
502
503static void wl1271_netstack_work(struct work_struct *work)
504{
505	struct wl1271 *wl =
506		container_of(work, struct wl1271, netstack_work);
507
508	do {
509		wl1271_flush_deferred_work(wl);
510	} while (skb_queue_len(&wl->deferred_rx_queue));
511}
512
513#define WL1271_IRQ_MAX_LOOPS 256
514
515static int wlcore_irq_locked(struct wl1271 *wl)
516{
517	int ret = 0;
518	u32 intr;
519	int loopcount = WL1271_IRQ_MAX_LOOPS;
520	bool done = false;
521	unsigned int defer_count;
522	unsigned long flags;
523
524	/*
525	 * In case edge triggered interrupt must be used, we cannot iterate
526	 * more than once without introducing race conditions with the hardirq.
527	 */
528	if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
529		loopcount = 1;
530
531	wl1271_debug(DEBUG_IRQ, "IRQ work");
532
533	if (unlikely(wl->state != WLCORE_STATE_ON))
534		goto out;
535
536	ret = wl1271_ps_elp_wakeup(wl);
537	if (ret < 0)
538		goto out;
539
540	while (!done && loopcount--) {
541		/*
542		 * In order to avoid a race with the hardirq, clear the flag
543		 * before acknowledging the chip. Since the mutex is held,
544		 * wl1271_ps_elp_wakeup cannot be called concurrently.
545		 */
546		clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
547		smp_mb__after_atomic();
548
549		ret = wlcore_fw_status(wl, wl->fw_status);
550		if (ret < 0)
551			goto out;
552
553		wlcore_hw_tx_immediate_compl(wl);
554
555		intr = wl->fw_status->intr;
556		intr &= WLCORE_ALL_INTR_MASK;
557		if (!intr) {
558			done = true;
559			continue;
560		}
561
562		if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
563			wl1271_error("HW watchdog interrupt received! starting recovery.");
564			wl->watchdog_recovery = true;
565			ret = -EIO;
566
567			/* restarting the chip. ignore any other interrupt. */
568			goto out;
569		}
570
571		if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
572			wl1271_error("SW watchdog interrupt received! "
573				     "starting recovery.");
574			wl->watchdog_recovery = true;
575			ret = -EIO;
576
577			/* restarting the chip. ignore any other interrupt. */
578			goto out;
579		}
580
581		if (likely(intr & WL1271_ACX_INTR_DATA)) {
582			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
583
584			ret = wlcore_rx(wl, wl->fw_status);
585			if (ret < 0)
586				goto out;
587
588			/* Check if any tx blocks were freed */
589			spin_lock_irqsave(&wl->wl_lock, flags);
590			if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
591			    wl1271_tx_total_queue_count(wl) > 0) {
592				spin_unlock_irqrestore(&wl->wl_lock, flags);
593				/*
594				 * In order to avoid starvation of the TX path,
595				 * call the work function directly.
596				 */
597				ret = wlcore_tx_work_locked(wl);
598				if (ret < 0)
599					goto out;
600			} else {
601				spin_unlock_irqrestore(&wl->wl_lock, flags);
602			}
603
604			/* check for tx results */
605			ret = wlcore_hw_tx_delayed_compl(wl);
606			if (ret < 0)
607				goto out;
608
609			/* Make sure the deferred queues don't get too long */
610			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
611				      skb_queue_len(&wl->deferred_rx_queue);
612			if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
613				wl1271_flush_deferred_work(wl);
614		}
615
616		if (intr & WL1271_ACX_INTR_EVENT_A) {
617			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
618			ret = wl1271_event_handle(wl, 0);
619			if (ret < 0)
620				goto out;
621		}
622
623		if (intr & WL1271_ACX_INTR_EVENT_B) {
624			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
625			ret = wl1271_event_handle(wl, 1);
626			if (ret < 0)
627				goto out;
628		}
629
630		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
631			wl1271_debug(DEBUG_IRQ,
632				     "WL1271_ACX_INTR_INIT_COMPLETE");
633
634		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
635			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
636	}
637
638	wl1271_ps_elp_sleep(wl);
639
640out:
641	return ret;
642}
643
644static irqreturn_t wlcore_irq(int irq, void *cookie)
645{
646	int ret;
647	unsigned long flags;
648	struct wl1271 *wl = cookie;
649
650	/* complete the ELP completion */
651	spin_lock_irqsave(&wl->wl_lock, flags);
652	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
653	if (wl->elp_compl) {
654		complete(wl->elp_compl);
655		wl->elp_compl = NULL;
656	}
657
658	if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
659		/* don't enqueue a work right now. mark it as pending */
660		set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
661		wl1271_debug(DEBUG_IRQ, "should not enqueue work");
662		disable_irq_nosync(wl->irq);
663		pm_wakeup_event(wl->dev, 0);
664		spin_unlock_irqrestore(&wl->wl_lock, flags);
665		return IRQ_HANDLED;
666	}
667	spin_unlock_irqrestore(&wl->wl_lock, flags);
668
669	/* TX might be handled here, avoid redundant work */
670	set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
671	cancel_work_sync(&wl->tx_work);
672
673	mutex_lock(&wl->mutex);
674
675	ret = wlcore_irq_locked(wl);
676	if (ret)
677		wl12xx_queue_recovery_work(wl);
678
679	spin_lock_irqsave(&wl->wl_lock, flags);
680	/* In case TX was not handled here, queue TX work */
681	clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
682	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
683	    wl1271_tx_total_queue_count(wl) > 0)
684		ieee80211_queue_work(wl->hw, &wl->tx_work);
685	spin_unlock_irqrestore(&wl->wl_lock, flags);
686
687	mutex_unlock(&wl->mutex);
688
689	return IRQ_HANDLED;
690}
691
692struct vif_counter_data {
693	u8 counter;
694
695	struct ieee80211_vif *cur_vif;
696	bool cur_vif_running;
697};
698
699static void wl12xx_vif_count_iter(void *data, u8 *mac,
700				  struct ieee80211_vif *vif)
701{
702	struct vif_counter_data *counter = data;
703
704	counter->counter++;
705	if (counter->cur_vif == vif)
706		counter->cur_vif_running = true;
707}
708
709/* caller must not hold wl->mutex, as it might deadlock */
710static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
711			       struct ieee80211_vif *cur_vif,
712			       struct vif_counter_data *data)
713{
714	memset(data, 0, sizeof(*data));
715	data->cur_vif = cur_vif;
716
717	ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
718					    wl12xx_vif_count_iter, data);
719}
720
721static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
722{
723	const struct firmware *fw;
724	const char *fw_name;
725	enum wl12xx_fw_type fw_type;
726	int ret;
727
728	if (plt) {
729		fw_type = WL12XX_FW_TYPE_PLT;
730		fw_name = wl->plt_fw_name;
731	} else {
732		/*
733		 * we can't call wl12xx_get_vif_count() here because
734		 * wl->mutex is taken, so use the cached last_vif_count value
735		 */
736		if (wl->last_vif_count > 1 && wl->mr_fw_name) {
737			fw_type = WL12XX_FW_TYPE_MULTI;
738			fw_name = wl->mr_fw_name;
739		} else {
740			fw_type = WL12XX_FW_TYPE_NORMAL;
741			fw_name = wl->sr_fw_name;
742		}
743	}
744
745	if (wl->fw_type == fw_type)
746		return 0;
747
748	wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
749
750	ret = request_firmware(&fw, fw_name, wl->dev);
751
752	if (ret < 0) {
753		wl1271_error("could not get firmware %s: %d", fw_name, ret);
754		return ret;
755	}
756
757	if (fw->size % 4) {
758		wl1271_error("firmware size is not multiple of 32 bits: %zu",
759			     fw->size);
760		ret = -EILSEQ;
761		goto out;
762	}
763
764	vfree(wl->fw);
765	wl->fw_type = WL12XX_FW_TYPE_NONE;
766	wl->fw_len = fw->size;
767	wl->fw = vmalloc(wl->fw_len);
768
769	if (!wl->fw) {
770		wl1271_error("could not allocate memory for the firmware");
771		ret = -ENOMEM;
772		goto out;
773	}
774
775	memcpy(wl->fw, fw->data, wl->fw_len);
776	ret = 0;
777	wl->fw_type = fw_type;
778out:
779	release_firmware(fw);
780
781	return ret;
782}
783
784void wl12xx_queue_recovery_work(struct wl1271 *wl)
785{
786	/* Avoid a recursive recovery */
787	if (wl->state == WLCORE_STATE_ON) {
788		WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
789				  &wl->flags));
790
791		wl->state = WLCORE_STATE_RESTARTING;
792		set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
793		wl1271_ps_elp_wakeup(wl);
794		wlcore_disable_interrupts_nosync(wl);
795		ieee80211_queue_work(wl->hw, &wl->recovery_work);
796	}
797}
798
799size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
800{
801	size_t len;
802
803	/* Make sure we have enough room */
804	len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
805
806	/* Fill the FW log file, consumed by the sysfs fwlog entry */
807	memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
808	wl->fwlog_size += len;
809
810	return len;
811}
812
813static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
814{
815	struct wlcore_partition_set part, old_part;
816	u32 addr;
817	u32 offset;
818	u32 end_of_log;
819	u8 *block;
820	int ret;
821
822	if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
823	    (wl->conf.fwlog.mem_blocks == 0))
824		return;
825
826	wl1271_info("Reading FW panic log");
827
828	block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL);
829	if (!block)
830		return;
831
832	/*
833	 * Make sure the chip is awake and the logger isn't active.
834	 * Do not send a stop fwlog command if the fw is hanged or if
835	 * dbgpins are used (due to some fw bug).
836	 */
837	if (wl1271_ps_elp_wakeup(wl))
838		goto out;
839	if (!wl->watchdog_recovery &&
840	    wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
841		wl12xx_cmd_stop_fwlog(wl);
842
843	/* Read the first memory block address */
844	ret = wlcore_fw_status(wl, wl->fw_status);
845	if (ret < 0)
846		goto out;
847
848	addr = wl->fw_status->log_start_addr;
849	if (!addr)
850		goto out;
851
852	if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
853		offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
854		end_of_log = wl->fwlog_end;
855	} else {
856		offset = sizeof(addr);
857		end_of_log = addr;
858	}
859
860	old_part = wl->curr_part;
861	memset(&part, 0, sizeof(part));
862
863	/* Traverse the memory blocks linked list */
864	do {
865		part.mem.start = wlcore_hw_convert_hwaddr(wl, addr);
866		part.mem.size  = PAGE_SIZE;
867
868		ret = wlcore_set_partition(wl, &part);
869		if (ret < 0) {
870			wl1271_error("%s: set_partition start=0x%X size=%d",
871				__func__, part.mem.start, part.mem.size);
872			goto out;
873		}
874
875		memset(block, 0, wl->fw_mem_block_size);
876		ret = wlcore_read_hwaddr(wl, addr, block,
877					wl->fw_mem_block_size, false);
878
879		if (ret < 0)
880			goto out;
881
882		/*
883		 * Memory blocks are linked to one another. The first 4 bytes
884		 * of each memory block hold the hardware address of the next
885		 * one. The last memory block points to the first one in
886		 * on demand mode and is equal to 0x2000000 in continuous mode.
887		 */
888		addr = le32_to_cpup((__le32 *)block);
889
890		if (!wl12xx_copy_fwlog(wl, block + offset,
891					wl->fw_mem_block_size - offset))
892			break;
893	} while (addr && (addr != end_of_log));
894
895	wake_up_interruptible(&wl->fwlog_waitq);
896
897out:
898	kfree(block);
899	wlcore_set_partition(wl, &old_part);
900}
901
902static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
903				   u8 hlid, struct ieee80211_sta *sta)
904{
905	struct wl1271_station *wl_sta;
906	u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
907
908	wl_sta = (void *)sta->drv_priv;
909	wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
910
911	/*
912	 * increment the initial seq number on recovery to account for
913	 * transmitted packets that we haven't yet got in the FW status
914	 */
915	if (wlvif->encryption_type == KEY_GEM)
916		sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
917
918	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
919		wl_sta->total_freed_pkts += sqn_recovery_padding;
920}
921
922static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
923					struct wl12xx_vif *wlvif,
924					u8 hlid, const u8 *addr)
925{
926	struct ieee80211_sta *sta;
927	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
928
929	if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
930		    is_zero_ether_addr(addr)))
931		return;
932
933	rcu_read_lock();
934	sta = ieee80211_find_sta(vif, addr);
935	if (sta)
936		wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
937	rcu_read_unlock();
938}
939
940static void wlcore_print_recovery(struct wl1271 *wl)
941{
942	u32 pc = 0;
943	u32 hint_sts = 0;
944	int ret;
945
946	wl1271_info("Hardware recovery in progress. FW ver: %s",
947		    wl->chip.fw_ver_str);
948
949	/* change partitions momentarily so we can read the FW pc */
950	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
951	if (ret < 0)
952		return;
953
954	ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
955	if (ret < 0)
956		return;
957
958	ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
959	if (ret < 0)
960		return;
961
962	wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
963				pc, hint_sts, ++wl->recovery_count);
964
965	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
966}
967
968
969static void wl1271_recovery_work(struct work_struct *work)
970{
971	struct wl1271 *wl =
972		container_of(work, struct wl1271, recovery_work);
973	struct wl12xx_vif *wlvif;
974	struct ieee80211_vif *vif;
975
976	mutex_lock(&wl->mutex);
977
978	if (wl->state == WLCORE_STATE_OFF || wl->plt)
979		goto out_unlock;
980
981	if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
982		if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
983			wl12xx_read_fwlog_panic(wl);
984		wlcore_print_recovery(wl);
985	}
986
987	BUG_ON(wl->conf.recovery.bug_on_recovery &&
988	       !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
989
990	if (wl->conf.recovery.no_recovery) {
991		wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
992		goto out_unlock;
993	}
994
995	/* Prevent spurious TX during FW restart */
996	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
997
998	/* reboot the chipset */
999	while (!list_empty(&wl->wlvif_list)) {
1000		wlvif = list_first_entry(&wl->wlvif_list,
1001				       struct wl12xx_vif, list);
1002		vif = wl12xx_wlvif_to_vif(wlvif);
1003
1004		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
1005		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
1006			wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
1007						    vif->bss_conf.bssid);
1008		}
1009
1010		__wl1271_op_remove_interface(wl, vif, false);
1011	}
1012
1013	wlcore_op_stop_locked(wl);
1014
1015	ieee80211_restart_hw(wl->hw);
1016
1017	/*
1018	 * Its safe to enable TX now - the queues are stopped after a request
1019	 * to restart the HW.
1020	 */
1021	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1022
1023out_unlock:
1024	wl->watchdog_recovery = false;
1025	clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
1026	mutex_unlock(&wl->mutex);
1027}
1028
1029static int wlcore_fw_wakeup(struct wl1271 *wl)
1030{
1031	return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1032}
1033
1034static int wl1271_setup(struct wl1271 *wl)
1035{
1036	wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1037	if (!wl->raw_fw_status)
1038		goto err;
1039
1040	wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1041	if (!wl->fw_status)
1042		goto err;
1043
1044	wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1045	if (!wl->tx_res_if)
1046		goto err;
1047
1048	return 0;
1049err:
1050	kfree(wl->fw_status);
1051	kfree(wl->raw_fw_status);
1052	return -ENOMEM;
1053}
1054
1055static int wl12xx_set_power_on(struct wl1271 *wl)
1056{
1057	int ret;
1058
1059	msleep(WL1271_PRE_POWER_ON_SLEEP);
1060	ret = wl1271_power_on(wl);
1061	if (ret < 0)
1062		goto out;
1063	msleep(WL1271_POWER_ON_SLEEP);
1064	wl1271_io_reset(wl);
1065	wl1271_io_init(wl);
1066
1067	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1068	if (ret < 0)
1069		goto fail;
1070
1071	/* ELP module wake up */
1072	ret = wlcore_fw_wakeup(wl);
1073	if (ret < 0)
1074		goto fail;
1075
1076out:
1077	return ret;
1078
1079fail:
1080	wl1271_power_off(wl);
1081	return ret;
1082}
1083
1084static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1085{
1086	int ret = 0;
1087
1088	ret = wl12xx_set_power_on(wl);
1089	if (ret < 0)
1090		goto out;
1091
1092	/*
1093	 * For wl127x based devices we could use the default block
1094	 * size (512 bytes), but due to a bug in the sdio driver, we
1095	 * need to set it explicitly after the chip is powered on.  To
1096	 * simplify the code and since the performance impact is
1097	 * negligible, we use the same block size for all different
1098	 * chip types.
1099	 *
1100	 * Check if the bus supports blocksize alignment and, if it
1101	 * doesn't, make sure we don't have the quirk.
1102	 */
1103	if (!wl1271_set_block_size(wl))
1104		wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1105
1106	/* TODO: make sure the lower driver has set things up correctly */
1107
1108	ret = wl1271_setup(wl);
1109	if (ret < 0)
1110		goto out;
1111
1112	ret = wl12xx_fetch_firmware(wl, plt);
1113	if (ret < 0)
1114		goto out;
1115
1116out:
1117	return ret;
1118}
1119
1120int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1121{
1122	int retries = WL1271_BOOT_RETRIES;
1123	struct wiphy *wiphy = wl->hw->wiphy;
1124
1125	static const char* const PLT_MODE[] = {
1126		"PLT_OFF",
1127		"PLT_ON",
1128		"PLT_FEM_DETECT",
1129		"PLT_CHIP_AWAKE"
1130	};
1131
1132	int ret;
1133
1134	mutex_lock(&wl->mutex);
1135
1136	wl1271_notice("power up");
1137
1138	if (wl->state != WLCORE_STATE_OFF) {
1139		wl1271_error("cannot go into PLT state because not "
1140			     "in off state: %d", wl->state);
1141		ret = -EBUSY;
1142		goto out;
1143	}
1144
1145	/* Indicate to lower levels that we are now in PLT mode */
1146	wl->plt = true;
1147	wl->plt_mode = plt_mode;
1148
1149	while (retries) {
1150		retries--;
1151		ret = wl12xx_chip_wakeup(wl, true);
1152		if (ret < 0)
1153			goto power_off;
1154
1155		if (plt_mode != PLT_CHIP_AWAKE) {
1156			ret = wl->ops->plt_init(wl);
1157			if (ret < 0)
1158				goto power_off;
1159		}
1160
1161		wl->state = WLCORE_STATE_ON;
1162		wl1271_notice("firmware booted in PLT mode %s (%s)",
1163			      PLT_MODE[plt_mode],
1164			      wl->chip.fw_ver_str);
1165
1166		/* update hw/fw version info in wiphy struct */
1167		wiphy->hw_version = wl->chip.id;
1168		strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1169			sizeof(wiphy->fw_version));
1170
1171		goto out;
1172
1173power_off:
1174		wl1271_power_off(wl);
1175	}
1176
1177	wl->plt = false;
1178	wl->plt_mode = PLT_OFF;
1179
1180	wl1271_error("firmware boot in PLT mode failed despite %d retries",
1181		     WL1271_BOOT_RETRIES);
1182out:
1183	mutex_unlock(&wl->mutex);
1184
1185	return ret;
1186}
1187
1188int wl1271_plt_stop(struct wl1271 *wl)
1189{
1190	int ret = 0;
1191
1192	wl1271_notice("power down");
1193
1194	/*
1195	 * Interrupts must be disabled before setting the state to OFF.
1196	 * Otherwise, the interrupt handler might be called and exit without
1197	 * reading the interrupt status.
1198	 */
1199	wlcore_disable_interrupts(wl);
1200	mutex_lock(&wl->mutex);
1201	if (!wl->plt) {
1202		mutex_unlock(&wl->mutex);
1203
1204		/*
1205		 * This will not necessarily enable interrupts as interrupts
1206		 * may have been disabled when op_stop was called. It will,
1207		 * however, balance the above call to disable_interrupts().
1208		 */
1209		wlcore_enable_interrupts(wl);
1210
1211		wl1271_error("cannot power down because not in PLT "
1212			     "state: %d", wl->state);
1213		ret = -EBUSY;
1214		goto out;
1215	}
1216
1217	mutex_unlock(&wl->mutex);
1218
1219	wl1271_flush_deferred_work(wl);
1220	cancel_work_sync(&wl->netstack_work);
1221	cancel_work_sync(&wl->recovery_work);
1222	cancel_delayed_work_sync(&wl->elp_work);
1223	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1224
1225	mutex_lock(&wl->mutex);
1226	wl1271_power_off(wl);
1227	wl->flags = 0;
1228	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1229	wl->state = WLCORE_STATE_OFF;
1230	wl->plt = false;
1231	wl->plt_mode = PLT_OFF;
1232	wl->rx_counter = 0;
1233	mutex_unlock(&wl->mutex);
1234
1235out:
1236	return ret;
1237}
1238
1239static void wl1271_op_tx(struct ieee80211_hw *hw,
1240			 struct ieee80211_tx_control *control,
1241			 struct sk_buff *skb)
1242{
1243	struct wl1271 *wl = hw->priv;
1244	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1245	struct ieee80211_vif *vif = info->control.vif;
1246	struct wl12xx_vif *wlvif = NULL;
1247	unsigned long flags;
1248	int q, mapping;
1249	u8 hlid;
1250
1251	if (!vif) {
1252		wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1253		ieee80211_free_txskb(hw, skb);
1254		return;
1255	}
1256
1257	wlvif = wl12xx_vif_to_data(vif);
1258	mapping = skb_get_queue_mapping(skb);
1259	q = wl1271_tx_get_queue(mapping);
1260
1261	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1262
1263	spin_lock_irqsave(&wl->wl_lock, flags);
1264
1265	/*
1266	 * drop the packet if the link is invalid or the queue is stopped
1267	 * for any reason but watermark. Watermark is a "soft"-stop so we
1268	 * allow these packets through.
1269	 */
1270	if (hlid == WL12XX_INVALID_LINK_ID ||
1271	    (!test_bit(hlid, wlvif->links_map)) ||
1272	     (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1273	      !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1274			WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1275		wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1276		ieee80211_free_txskb(hw, skb);
1277		goto out;
1278	}
1279
1280	wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1281		     hlid, q, skb->len);
1282	skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1283
1284	wl->tx_queue_count[q]++;
1285	wlvif->tx_queue_count[q]++;
1286
1287	/*
1288	 * The workqueue is slow to process the tx_queue and we need stop
1289	 * the queue here, otherwise the queue will get too long.
1290	 */
1291	if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1292	    !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1293					WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1294		wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1295		wlcore_stop_queue_locked(wl, wlvif, q,
1296					 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1297	}
1298
1299	/*
1300	 * The chip specific setup must run before the first TX packet -
1301	 * before that, the tx_work will not be initialized!
1302	 */
1303
1304	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1305	    !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1306		ieee80211_queue_work(wl->hw, &wl->tx_work);
1307
1308out:
1309	spin_unlock_irqrestore(&wl->wl_lock, flags);
1310}
1311
1312int wl1271_tx_dummy_packet(struct wl1271 *wl)
1313{
1314	unsigned long flags;
1315	int q;
1316
1317	/* no need to queue a new dummy packet if one is already pending */
1318	if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1319		return 0;
1320
1321	q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1322
1323	spin_lock_irqsave(&wl->wl_lock, flags);
1324	set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1325	wl->tx_queue_count[q]++;
1326	spin_unlock_irqrestore(&wl->wl_lock, flags);
1327
1328	/* The FW is low on RX memory blocks, so send the dummy packet asap */
1329	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1330		return wlcore_tx_work_locked(wl);
1331
1332	/*
1333	 * If the FW TX is busy, TX work will be scheduled by the threaded
1334	 * interrupt handler function
1335	 */
1336	return 0;
1337}
1338
1339/*
1340 * The size of the dummy packet should be at least 1400 bytes. However, in
1341 * order to minimize the number of bus transactions, aligning it to 512 bytes
1342 * boundaries could be beneficial, performance wise
1343 */
1344#define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1345
1346static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1347{
1348	struct sk_buff *skb;
1349	struct ieee80211_hdr_3addr *hdr;
1350	unsigned int dummy_packet_size;
1351
1352	dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1353			    sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1354
1355	skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1356	if (!skb) {
1357		wl1271_warning("Failed to allocate a dummy packet skb");
1358		return NULL;
1359	}
1360
1361	skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1362
1363	hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1364	memset(hdr, 0, sizeof(*hdr));
1365	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1366					 IEEE80211_STYPE_NULLFUNC |
1367					 IEEE80211_FCTL_TODS);
1368
1369	memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1370
1371	/* Dummy packets require the TID to be management */
1372	skb->priority = WL1271_TID_MGMT;
1373
1374	/* Initialize all fields that might be used */
1375	skb_set_queue_mapping(skb, 0);
1376	memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1377
1378	return skb;
1379}
1380
1381
1382#ifdef CONFIG_PM
1383static int
1384wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1385{
1386	int num_fields = 0, in_field = 0, fields_size = 0;
1387	int i, pattern_len = 0;
1388
1389	if (!p->mask) {
1390		wl1271_warning("No mask in WoWLAN pattern");
1391		return -EINVAL;
1392	}
1393
1394	/*
1395	 * The pattern is broken up into segments of bytes at different offsets
1396	 * that need to be checked by the FW filter. Each segment is called
1397	 * a field in the FW API. We verify that the total number of fields
1398	 * required for this pattern won't exceed FW limits (8)
1399	 * as well as the total fields buffer won't exceed the FW limit.
1400	 * Note that if there's a pattern which crosses Ethernet/IP header
1401	 * boundary a new field is required.
1402	 */
1403	for (i = 0; i < p->pattern_len; i++) {
1404		if (test_bit(i, (unsigned long *)p->mask)) {
1405			if (!in_field) {
1406				in_field = 1;
1407				pattern_len = 1;
1408			} else {
1409				if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1410					num_fields++;
1411					fields_size += pattern_len +
1412						RX_FILTER_FIELD_OVERHEAD;
1413					pattern_len = 1;
1414				} else
1415					pattern_len++;
1416			}
1417		} else {
1418			if (in_field) {
1419				in_field = 0;
1420				fields_size += pattern_len +
1421					RX_FILTER_FIELD_OVERHEAD;
1422				num_fields++;
1423			}
1424		}
1425	}
1426
1427	if (in_field) {
1428		fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1429		num_fields++;
1430	}
1431
1432	if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1433		wl1271_warning("RX Filter too complex. Too many segments");
1434		return -EINVAL;
1435	}
1436
1437	if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1438		wl1271_warning("RX filter pattern is too big");
1439		return -E2BIG;
1440	}
1441
1442	return 0;
1443}
1444
1445struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1446{
1447	return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1448}
1449
1450void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1451{
1452	int i;
1453
1454	if (filter == NULL)
1455		return;
1456
1457	for (i = 0; i < filter->num_fields; i++)
1458		kfree(filter->fields[i].pattern);
1459
1460	kfree(filter);
1461}
1462
1463int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1464				 u16 offset, u8 flags,
1465				 const u8 *pattern, u8 len)
1466{
1467	struct wl12xx_rx_filter_field *field;
1468
1469	if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1470		wl1271_warning("Max fields per RX filter. can't alloc another");
1471		return -EINVAL;
1472	}
1473
1474	field = &filter->fields[filter->num_fields];
1475
1476	field->pattern = kzalloc(len, GFP_KERNEL);
1477	if (!field->pattern) {
1478		wl1271_warning("Failed to allocate RX filter pattern");
1479		return -ENOMEM;
1480	}
1481
1482	filter->num_fields++;
1483
1484	field->offset = cpu_to_le16(offset);
1485	field->flags = flags;
1486	field->len = len;
1487	memcpy(field->pattern, pattern, len);
1488
1489	return 0;
1490}
1491
1492int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1493{
1494	int i, fields_size = 0;
1495
1496	for (i = 0; i < filter->num_fields; i++)
1497		fields_size += filter->fields[i].len +
1498			sizeof(struct wl12xx_rx_filter_field) -
1499			sizeof(u8 *);
1500
1501	return fields_size;
1502}
1503
1504void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1505				    u8 *buf)
1506{
1507	int i;
1508	struct wl12xx_rx_filter_field *field;
1509
1510	for (i = 0; i < filter->num_fields; i++) {
1511		field = (struct wl12xx_rx_filter_field *)buf;
1512
1513		field->offset = filter->fields[i].offset;
1514		field->flags = filter->fields[i].flags;
1515		field->len = filter->fields[i].len;
1516
1517		memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1518		buf += sizeof(struct wl12xx_rx_filter_field) -
1519			sizeof(u8 *) + field->len;
1520	}
1521}
1522
1523/*
1524 * Allocates an RX filter returned through f
1525 * which needs to be freed using rx_filter_free()
1526 */
1527static int
1528wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1529					   struct wl12xx_rx_filter **f)
1530{
1531	int i, j, ret = 0;
1532	struct wl12xx_rx_filter *filter;
1533	u16 offset;
1534	u8 flags, len;
1535
1536	filter = wl1271_rx_filter_alloc();
1537	if (!filter) {
1538		wl1271_warning("Failed to alloc rx filter");
1539		ret = -ENOMEM;
1540		goto err;
1541	}
1542
1543	i = 0;
1544	while (i < p->pattern_len) {
1545		if (!test_bit(i, (unsigned long *)p->mask)) {
1546			i++;
1547			continue;
1548		}
1549
1550		for (j = i; j < p->pattern_len; j++) {
1551			if (!test_bit(j, (unsigned long *)p->mask))
1552				break;
1553
1554			if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1555			    j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1556				break;
1557		}
1558
1559		if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1560			offset = i;
1561			flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1562		} else {
1563			offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1564			flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1565		}
1566
1567		len = j - i;
1568
1569		ret = wl1271_rx_filter_alloc_field(filter,
1570						   offset,
1571						   flags,
1572						   &p->pattern[i], len);
1573		if (ret)
1574			goto err;
1575
1576		i = j;
1577	}
1578
1579	filter->action = FILTER_SIGNAL;
1580
1581	*f = filter;
1582	return 0;
1583
1584err:
1585	wl1271_rx_filter_free(filter);
1586	*f = NULL;
1587
1588	return ret;
1589}
1590
1591static int wl1271_configure_wowlan(struct wl1271 *wl,
1592				   struct cfg80211_wowlan *wow)
1593{
1594	int i, ret;
1595
1596	if (!wow || wow->any || !wow->n_patterns) {
1597		ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1598							  FILTER_SIGNAL);
1599		if (ret)
1600			goto out;
1601
1602		ret = wl1271_rx_filter_clear_all(wl);
1603		if (ret)
1604			goto out;
1605
1606		return 0;
1607	}
1608
1609	if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1610		return -EINVAL;
1611
1612	/* Validate all incoming patterns before clearing current FW state */
1613	for (i = 0; i < wow->n_patterns; i++) {
1614		ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1615		if (ret) {
1616			wl1271_warning("Bad wowlan pattern %d", i);
1617			return ret;
1618		}
1619	}
1620
1621	ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1622	if (ret)
1623		goto out;
1624
1625	ret = wl1271_rx_filter_clear_all(wl);
1626	if (ret)
1627		goto out;
1628
1629	/* Translate WoWLAN patterns into filters */
1630	for (i = 0; i < wow->n_patterns; i++) {
1631		struct cfg80211_pkt_pattern *p;
1632		struct wl12xx_rx_filter *filter = NULL;
1633
1634		p = &wow->patterns[i];
1635
1636		ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1637		if (ret) {
1638			wl1271_warning("Failed to create an RX filter from "
1639				       "wowlan pattern %d", i);
1640			goto out;
1641		}
1642
1643		ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1644
1645		wl1271_rx_filter_free(filter);
1646		if (ret)
1647			goto out;
1648	}
1649
1650	ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1651
1652out:
1653	return ret;
1654}
1655
1656static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1657					struct wl12xx_vif *wlvif,
1658					struct cfg80211_wowlan *wow)
1659{
1660	int ret = 0;
1661
1662	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1663		goto out;
1664
1665	ret = wl1271_ps_elp_wakeup(wl);
1666	if (ret < 0)
1667		goto out;
1668
1669	ret = wl1271_configure_wowlan(wl, wow);
1670	if (ret < 0)
1671		goto out_sleep;
1672
1673	if ((wl->conf.conn.suspend_wake_up_event ==
1674	     wl->conf.conn.wake_up_event) &&
1675	    (wl->conf.conn.suspend_listen_interval ==
1676	     wl->conf.conn.listen_interval))
1677		goto out_sleep;
1678
1679	ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1680				    wl->conf.conn.suspend_wake_up_event,
1681				    wl->conf.conn.suspend_listen_interval);
1682
1683	if (ret < 0)
1684		wl1271_error("suspend: set wake up conditions failed: %d", ret);
1685
1686out_sleep:
1687	wl1271_ps_elp_sleep(wl);
1688out:
1689	return ret;
1690
1691}
1692
1693static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1694				       struct wl12xx_vif *wlvif)
1695{
1696	int ret = 0;
1697
1698	if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1699		goto out;
1700
1701	ret = wl1271_ps_elp_wakeup(wl);
1702	if (ret < 0)
1703		goto out;
1704
1705	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1706
1707	wl1271_ps_elp_sleep(wl);
1708out:
1709	return ret;
1710
1711}
1712
1713static int wl1271_configure_suspend(struct wl1271 *wl,
1714				    struct wl12xx_vif *wlvif,
1715				    struct cfg80211_wowlan *wow)
1716{
1717	if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1718		return wl1271_configure_suspend_sta(wl, wlvif, wow);
1719	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1720		return wl1271_configure_suspend_ap(wl, wlvif);
1721	return 0;
1722}
1723
1724static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1725{
1726	int ret = 0;
1727	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1728	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1729
1730	if ((!is_ap) && (!is_sta))
1731		return;
1732
1733	if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1734		return;
1735
1736	ret = wl1271_ps_elp_wakeup(wl);
1737	if (ret < 0)
1738		return;
1739
1740	if (is_sta) {
1741		wl1271_configure_wowlan(wl, NULL);
1742
1743		if ((wl->conf.conn.suspend_wake_up_event ==
1744		     wl->conf.conn.wake_up_event) &&
1745		    (wl->conf.conn.suspend_listen_interval ==
1746		     wl->conf.conn.listen_interval))
1747			goto out_sleep;
1748
1749		ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1750				    wl->conf.conn.wake_up_event,
1751				    wl->conf.conn.listen_interval);
1752
1753		if (ret < 0)
1754			wl1271_error("resume: wake up conditions failed: %d",
1755				     ret);
1756
1757	} else if (is_ap) {
1758		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1759	}
1760
1761out_sleep:
1762	wl1271_ps_elp_sleep(wl);
1763}
1764
1765static int wl1271_op_suspend(struct ieee80211_hw *hw,
1766			    struct cfg80211_wowlan *wow)
1767{
1768	struct wl1271 *wl = hw->priv;
1769	struct wl12xx_vif *wlvif;
1770	int ret;
1771
1772	wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1773	WARN_ON(!wow);
1774
1775	/* we want to perform the recovery before suspending */
1776	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1777		wl1271_warning("postponing suspend to perform recovery");
1778		return -EBUSY;
1779	}
1780
1781	wl1271_tx_flush(wl);
1782
1783	mutex_lock(&wl->mutex);
1784	wl->wow_enabled = true;
1785	wl12xx_for_each_wlvif(wl, wlvif) {
1786		ret = wl1271_configure_suspend(wl, wlvif, wow);
1787		if (ret < 0) {
1788			mutex_unlock(&wl->mutex);
1789			wl1271_warning("couldn't prepare device to suspend");
1790			return ret;
1791		}
1792	}
1793	mutex_unlock(&wl->mutex);
1794	/* flush any remaining work */
1795	wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1796
1797	/*
1798	 * disable and re-enable interrupts in order to flush
1799	 * the threaded_irq
1800	 */
1801	wlcore_disable_interrupts(wl);
1802
1803	/*
1804	 * set suspended flag to avoid triggering a new threaded_irq
1805	 * work. no need for spinlock as interrupts are disabled.
1806	 */
1807	set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1808
1809	wlcore_enable_interrupts(wl);
1810	flush_work(&wl->tx_work);
1811	flush_delayed_work(&wl->elp_work);
1812
1813	/*
1814	 * Cancel the watchdog even if above tx_flush failed. We will detect
1815	 * it on resume anyway.
1816	 */
1817	cancel_delayed_work(&wl->tx_watchdog_work);
1818
1819	return 0;
1820}
1821
1822static int wl1271_op_resume(struct ieee80211_hw *hw)
1823{
1824	struct wl1271 *wl = hw->priv;
1825	struct wl12xx_vif *wlvif;
1826	unsigned long flags;
1827	bool run_irq_work = false, pending_recovery;
1828	int ret;
1829
1830	wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1831		     wl->wow_enabled);
1832	WARN_ON(!wl->wow_enabled);
1833
1834	/*
1835	 * re-enable irq_work enqueuing, and call irq_work directly if
1836	 * there is a pending work.
1837	 */
1838	spin_lock_irqsave(&wl->wl_lock, flags);
1839	clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1840	if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1841		run_irq_work = true;
1842	spin_unlock_irqrestore(&wl->wl_lock, flags);
1843
1844	mutex_lock(&wl->mutex);
1845
1846	/* test the recovery flag before calling any SDIO functions */
1847	pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1848				    &wl->flags);
1849
1850	if (run_irq_work) {
1851		wl1271_debug(DEBUG_MAC80211,
1852			     "run postponed irq_work directly");
1853
1854		/* don't talk to the HW if recovery is pending */
1855		if (!pending_recovery) {
1856			ret = wlcore_irq_locked(wl);
1857			if (ret)
1858				wl12xx_queue_recovery_work(wl);
1859		}
1860
1861		wlcore_enable_interrupts(wl);
1862	}
1863
1864	if (pending_recovery) {
1865		wl1271_warning("queuing forgotten recovery on resume");
1866		ieee80211_queue_work(wl->hw, &wl->recovery_work);
1867		goto out;
1868	}
1869
1870	wl12xx_for_each_wlvif(wl, wlvif) {
1871		wl1271_configure_resume(wl, wlvif);
1872	}
1873
1874out:
1875	wl->wow_enabled = false;
1876
1877	/*
1878	 * Set a flag to re-init the watchdog on the first Tx after resume.
1879	 * That way we avoid possible conditions where Tx-complete interrupts
1880	 * fail to arrive and we perform a spurious recovery.
1881	 */
1882	set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1883	mutex_unlock(&wl->mutex);
1884
1885	return 0;
1886}
1887#endif
1888
1889static int wl1271_op_start(struct ieee80211_hw *hw)
1890{
1891	wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1892
1893	/*
1894	 * We have to delay the booting of the hardware because
1895	 * we need to know the local MAC address before downloading and
1896	 * initializing the firmware. The MAC address cannot be changed
1897	 * after boot, and without the proper MAC address, the firmware
1898	 * will not function properly.
1899	 *
1900	 * The MAC address is first known when the corresponding interface
1901	 * is added. That is where we will initialize the hardware.
1902	 */
1903
1904	return 0;
1905}
1906
1907static void wlcore_op_stop_locked(struct wl1271 *wl)
1908{
1909	int i;
1910
1911	if (wl->state == WLCORE_STATE_OFF) {
1912		if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1913					&wl->flags))
1914			wlcore_enable_interrupts(wl);
1915
1916		return;
1917	}
1918
1919	/*
1920	 * this must be before the cancel_work calls below, so that the work
1921	 * functions don't perform further work.
1922	 */
1923	wl->state = WLCORE_STATE_OFF;
1924
1925	/*
1926	 * Use the nosync variant to disable interrupts, so the mutex could be
1927	 * held while doing so without deadlocking.
1928	 */
1929	wlcore_disable_interrupts_nosync(wl);
1930
1931	mutex_unlock(&wl->mutex);
1932
1933	wlcore_synchronize_interrupts(wl);
1934	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1935		cancel_work_sync(&wl->recovery_work);
1936	wl1271_flush_deferred_work(wl);
1937	cancel_delayed_work_sync(&wl->scan_complete_work);
1938	cancel_work_sync(&wl->netstack_work);
1939	cancel_work_sync(&wl->tx_work);
1940	cancel_delayed_work_sync(&wl->elp_work);
1941	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1942
1943	/* let's notify MAC80211 about the remaining pending TX frames */
1944	mutex_lock(&wl->mutex);
1945	wl12xx_tx_reset(wl);
1946
1947	wl1271_power_off(wl);
1948	/*
1949	 * In case a recovery was scheduled, interrupts were disabled to avoid
1950	 * an interrupt storm. Now that the power is down, it is safe to
1951	 * re-enable interrupts to balance the disable depth
1952	 */
1953	if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1954		wlcore_enable_interrupts(wl);
1955
1956	wl->band = IEEE80211_BAND_2GHZ;
1957
1958	wl->rx_counter = 0;
1959	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1960	wl->channel_type = NL80211_CHAN_NO_HT;
1961	wl->tx_blocks_available = 0;
1962	wl->tx_allocated_blocks = 0;
1963	wl->tx_results_count = 0;
1964	wl->tx_packets_count = 0;
1965	wl->time_offset = 0;
1966	wl->ap_fw_ps_map = 0;
1967	wl->ap_ps_map = 0;
1968	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1969	memset(wl->roles_map, 0, sizeof(wl->roles_map));
1970	memset(wl->links_map, 0, sizeof(wl->links_map));
1971	memset(wl->roc_map, 0, sizeof(wl->roc_map));
1972	memset(wl->session_ids, 0, sizeof(wl->session_ids));
1973	memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1974	wl->active_sta_count = 0;
1975	wl->active_link_count = 0;
1976
1977	/* The system link is always allocated */
1978	wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1979	wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1980	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1981
1982	/*
1983	 * this is performed after the cancel_work calls and the associated
1984	 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1985	 * get executed before all these vars have been reset.
1986	 */
1987	wl->flags = 0;
1988
1989	wl->tx_blocks_freed = 0;
1990
1991	for (i = 0; i < NUM_TX_QUEUES; i++) {
1992		wl->tx_pkts_freed[i] = 0;
1993		wl->tx_allocated_pkts[i] = 0;
1994	}
1995
1996	wl1271_debugfs_reset(wl);
1997
1998	kfree(wl->raw_fw_status);
1999	wl->raw_fw_status = NULL;
2000	kfree(wl->fw_status);
2001	wl->fw_status = NULL;
2002	kfree(wl->tx_res_if);
2003	wl->tx_res_if = NULL;
2004	kfree(wl->target_mem_map);
2005	wl->target_mem_map = NULL;
2006
2007	/*
2008	 * FW channels must be re-calibrated after recovery,
2009	 * save current Reg-Domain channel configuration and clear it.
2010	 */
2011	memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2012	       sizeof(wl->reg_ch_conf_pending));
2013	memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2014}
2015
2016static void wlcore_op_stop(struct ieee80211_hw *hw)
2017{
2018	struct wl1271 *wl = hw->priv;
2019
2020	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2021
2022	mutex_lock(&wl->mutex);
2023
2024	wlcore_op_stop_locked(wl);
2025
2026	mutex_unlock(&wl->mutex);
2027}
2028
2029static void wlcore_channel_switch_work(struct work_struct *work)
2030{
2031	struct delayed_work *dwork;
2032	struct wl1271 *wl;
2033	struct ieee80211_vif *vif;
2034	struct wl12xx_vif *wlvif;
2035	int ret;
2036
2037	dwork = container_of(work, struct delayed_work, work);
2038	wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2039	wl = wlvif->wl;
2040
2041	wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2042
2043	mutex_lock(&wl->mutex);
2044
2045	if (unlikely(wl->state != WLCORE_STATE_ON))
2046		goto out;
2047
2048	/* check the channel switch is still ongoing */
2049	if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2050		goto out;
2051
2052	vif = wl12xx_wlvif_to_vif(wlvif);
2053	ieee80211_chswitch_done(vif, false);
2054
2055	ret = wl1271_ps_elp_wakeup(wl);
2056	if (ret < 0)
2057		goto out;
2058
2059	wl12xx_cmd_stop_channel_switch(wl, wlvif);
2060
2061	wl1271_ps_elp_sleep(wl);
2062out:
2063	mutex_unlock(&wl->mutex);
2064}
2065
2066static void wlcore_connection_loss_work(struct work_struct *work)
2067{
2068	struct delayed_work *dwork;
2069	struct wl1271 *wl;
2070	struct ieee80211_vif *vif;
2071	struct wl12xx_vif *wlvif;
2072
2073	dwork = container_of(work, struct delayed_work, work);
2074	wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2075	wl = wlvif->wl;
2076
2077	wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2078
2079	mutex_lock(&wl->mutex);
2080
2081	if (unlikely(wl->state != WLCORE_STATE_ON))
2082		goto out;
2083
2084	/* Call mac80211 connection loss */
2085	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2086		goto out;
2087
2088	vif = wl12xx_wlvif_to_vif(wlvif);
2089	ieee80211_connection_loss(vif);
2090out:
2091	mutex_unlock(&wl->mutex);
2092}
2093
2094static void wlcore_pending_auth_complete_work(struct work_struct *work)
2095{
2096	struct delayed_work *dwork;
2097	struct wl1271 *wl;
2098	struct wl12xx_vif *wlvif;
2099	unsigned long time_spare;
2100	int ret;
2101
2102	dwork = container_of(work, struct delayed_work, work);
2103	wlvif = container_of(dwork, struct wl12xx_vif,
2104			     pending_auth_complete_work);
2105	wl = wlvif->wl;
2106
2107	mutex_lock(&wl->mutex);
2108
2109	if (unlikely(wl->state != WLCORE_STATE_ON))
2110		goto out;
2111
2112	/*
2113	 * Make sure a second really passed since the last auth reply. Maybe
2114	 * a second auth reply arrived while we were stuck on the mutex.
2115	 * Check for a little less than the timeout to protect from scheduler
2116	 * irregularities.
2117	 */
2118	time_spare = jiffies +
2119			msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2120	if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2121		goto out;
2122
2123	ret = wl1271_ps_elp_wakeup(wl);
2124	if (ret < 0)
2125		goto out;
2126
2127	/* cancel the ROC if active */
2128	wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2129
2130	wl1271_ps_elp_sleep(wl);
2131out:
2132	mutex_unlock(&wl->mutex);
2133}
2134
2135static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2136{
2137	u8 policy = find_first_zero_bit(wl->rate_policies_map,
2138					WL12XX_MAX_RATE_POLICIES);
2139	if (policy >= WL12XX_MAX_RATE_POLICIES)
2140		return -EBUSY;
2141
2142	__set_bit(policy, wl->rate_policies_map);
2143	*idx = policy;
2144	return 0;
2145}
2146
2147static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2148{
2149	if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2150		return;
2151
2152	__clear_bit(*idx, wl->rate_policies_map);
2153	*idx = WL12XX_MAX_RATE_POLICIES;
2154}
2155
2156static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2157{
2158	u8 policy = find_first_zero_bit(wl->klv_templates_map,
2159					WLCORE_MAX_KLV_TEMPLATES);
2160	if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2161		return -EBUSY;
2162
2163	__set_bit(policy, wl->klv_templates_map);
2164	*idx = policy;
2165	return 0;
2166}
2167
2168static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2169{
2170	if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2171		return;
2172
2173	__clear_bit(*idx, wl->klv_templates_map);
2174	*idx = WLCORE_MAX_KLV_TEMPLATES;
2175}
2176
2177static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2178{
2179	switch (wlvif->bss_type) {
2180	case BSS_TYPE_AP_BSS:
2181		if (wlvif->p2p)
2182			return WL1271_ROLE_P2P_GO;
2183		else
2184			return WL1271_ROLE_AP;
2185
2186	case BSS_TYPE_STA_BSS:
2187		if (wlvif->p2p)
2188			return WL1271_ROLE_P2P_CL;
2189		else
2190			return WL1271_ROLE_STA;
2191
2192	case BSS_TYPE_IBSS:
2193		return WL1271_ROLE_IBSS;
2194
2195	default:
2196		wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2197	}
2198	return WL12XX_INVALID_ROLE_TYPE;
2199}
2200
2201static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2202{
2203	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2204	int i;
2205
2206	/* clear everything but the persistent data */
2207	memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2208
2209	switch (ieee80211_vif_type_p2p(vif)) {
2210	case NL80211_IFTYPE_P2P_CLIENT:
2211		wlvif->p2p = 1;
2212		/* fall-through */
2213	case NL80211_IFTYPE_STATION:
2214		wlvif->bss_type = BSS_TYPE_STA_BSS;
2215		break;
2216	case NL80211_IFTYPE_ADHOC:
2217		wlvif->bss_type = BSS_TYPE_IBSS;
2218		break;
2219	case NL80211_IFTYPE_P2P_GO:
2220		wlvif->p2p = 1;
2221		/* fall-through */
2222	case NL80211_IFTYPE_AP:
2223		wlvif->bss_type = BSS_TYPE_AP_BSS;
2224		break;
2225	default:
2226		wlvif->bss_type = MAX_BSS_TYPE;
2227		return -EOPNOTSUPP;
2228	}
2229
2230	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2231	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2232	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2233
2234	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2235	    wlvif->bss_type == BSS_TYPE_IBSS) {
2236		/* init sta/ibss data */
2237		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2238		wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2239		wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2240		wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2241		wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2242		wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2243		wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2244		wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2245	} else {
2246		/* init ap data */
2247		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2248		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2249		wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2250		wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2251		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2252			wl12xx_allocate_rate_policy(wl,
2253						&wlvif->ap.ucast_rate_idx[i]);
2254		wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2255		/*
2256		 * TODO: check if basic_rate shouldn't be
2257		 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2258		 * instead (the same thing for STA above).
2259		*/
2260		wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2261		/* TODO: this seems to be used only for STA, check it */
2262		wlvif->rate_set = CONF_TX_ENABLED_RATES;
2263	}
2264
2265	wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2266	wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2267	wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2268
2269	/*
2270	 * mac80211 configures some values globally, while we treat them
2271	 * per-interface. thus, on init, we have to copy them from wl
2272	 */
2273	wlvif->band = wl->band;
2274	wlvif->channel = wl->channel;
2275	wlvif->power_level = wl->power_level;
2276	wlvif->channel_type = wl->channel_type;
2277
2278	INIT_WORK(&wlvif->rx_streaming_enable_work,
2279		  wl1271_rx_streaming_enable_work);
2280	INIT_WORK(&wlvif->rx_streaming_disable_work,
2281		  wl1271_rx_streaming_disable_work);
2282	INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2283			  wlcore_channel_switch_work);
2284	INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2285			  wlcore_connection_loss_work);
2286	INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2287			  wlcore_pending_auth_complete_work);
2288	INIT_LIST_HEAD(&wlvif->list);
2289
2290	setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2291		    (unsigned long) wlvif);
2292	return 0;
2293}
2294
2295static int wl12xx_init_fw(struct wl1271 *wl)
2296{
2297	int retries = WL1271_BOOT_RETRIES;
2298	bool booted = false;
2299	struct wiphy *wiphy = wl->hw->wiphy;
2300	int ret;
2301
2302	while (retries) {
2303		retries--;
2304		ret = wl12xx_chip_wakeup(wl, false);
2305		if (ret < 0)
2306			goto power_off;
2307
2308		ret = wl->ops->boot(wl);
2309		if (ret < 0)
2310			goto power_off;
2311
2312		ret = wl1271_hw_init(wl);
2313		if (ret < 0)
2314			goto irq_disable;
2315
2316		booted = true;
2317		break;
2318
2319irq_disable:
2320		mutex_unlock(&wl->mutex);
2321		/* Unlocking the mutex in the middle of handling is
2322		   inherently unsafe. In this case we deem it safe to do,
2323		   because we need to let any possibly pending IRQ out of
2324		   the system (and while we are WLCORE_STATE_OFF the IRQ
2325		   work function will not do anything.) Also, any other
2326		   possible concurrent operations will fail due to the
2327		   current state, hence the wl1271 struct should be safe. */
2328		wlcore_disable_interrupts(wl);
2329		wl1271_flush_deferred_work(wl);
2330		cancel_work_sync(&wl->netstack_work);
2331		mutex_lock(&wl->mutex);
2332power_off:
2333		wl1271_power_off(wl);
2334	}
2335
2336	if (!booted) {
2337		wl1271_error("firmware boot failed despite %d retries",
2338			     WL1271_BOOT_RETRIES);
2339		goto out;
2340	}
2341
2342	wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2343
2344	/* update hw/fw version info in wiphy struct */
2345	wiphy->hw_version = wl->chip.id;
2346	strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2347		sizeof(wiphy->fw_version));
2348
2349	/*
2350	 * Now we know if 11a is supported (info from the NVS), so disable
2351	 * 11a channels if not supported
2352	 */
2353	if (!wl->enable_11a)
2354		wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2355
2356	wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2357		     wl->enable_11a ? "" : "not ");
2358
2359	wl->state = WLCORE_STATE_ON;
2360out:
2361	return ret;
2362}
2363
2364static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2365{
2366	return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2367}
2368
2369/*
2370 * Check whether a fw switch (i.e. moving from one loaded
2371 * fw to another) is needed. This function is also responsible
2372 * for updating wl->last_vif_count, so it must be called before
2373 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2374 * will be used).
2375 */
2376static bool wl12xx_need_fw_change(struct wl1271 *wl,
2377				  struct vif_counter_data vif_counter_data,
2378				  bool add)
2379{
2380	enum wl12xx_fw_type current_fw = wl->fw_type;
2381	u8 vif_count = vif_counter_data.counter;
2382
2383	if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2384		return false;
2385
2386	/* increase the vif count if this is a new vif */
2387	if (add && !vif_counter_data.cur_vif_running)
2388		vif_count++;
2389
2390	wl->last_vif_count = vif_count;
2391
2392	/* no need for fw change if the device is OFF */
2393	if (wl->state == WLCORE_STATE_OFF)
2394		return false;
2395
2396	/* no need for fw change if a single fw is used */
2397	if (!wl->mr_fw_name)
2398		return false;
2399
2400	if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2401		return true;
2402	if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2403		return true;
2404
2405	return false;
2406}
2407
2408/*
2409 * Enter "forced psm". Make sure the sta is in psm against the ap,
2410 * to make the fw switch a bit more disconnection-persistent.
2411 */
2412static void wl12xx_force_active_psm(struct wl1271 *wl)
2413{
2414	struct wl12xx_vif *wlvif;
2415
2416	wl12xx_for_each_wlvif_sta(wl, wlvif) {
2417		wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2418	}
2419}
2420
2421struct wlcore_hw_queue_iter_data {
2422	unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2423	/* current vif */
2424	struct ieee80211_vif *vif;
2425	/* is the current vif among those iterated */
2426	bool cur_running;
2427};
2428
2429static void wlcore_hw_queue_iter(void *data, u8 *mac,
2430				 struct ieee80211_vif *vif)
2431{
2432	struct wlcore_hw_queue_iter_data *iter_data = data;
2433
2434	if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2435		return;
2436
2437	if (iter_data->cur_running || vif == iter_data->vif) {
2438		iter_data->cur_running = true;
2439		return;
2440	}
2441
2442	__set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2443}
2444
2445static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2446					 struct wl12xx_vif *wlvif)
2447{
2448	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2449	struct wlcore_hw_queue_iter_data iter_data = {};
2450	int i, q_base;
2451
2452	iter_data.vif = vif;
2453
2454	/* mark all bits taken by active interfaces */
2455	ieee80211_iterate_active_interfaces_atomic(wl->hw,
2456					IEEE80211_IFACE_ITER_RESUME_ALL,
2457					wlcore_hw_queue_iter, &iter_data);
2458
2459	/* the current vif is already running in mac80211 (resume/recovery) */
2460	if (iter_data.cur_running) {
2461		wlvif->hw_queue_base = vif->hw_queue[0];
2462		wl1271_debug(DEBUG_MAC80211,
2463			     "using pre-allocated hw queue base %d",
2464			     wlvif->hw_queue_base);
2465
2466		/* interface type might have changed type */
2467		goto adjust_cab_queue;
2468	}
2469
2470	q_base = find_first_zero_bit(iter_data.hw_queue_map,
2471				     WLCORE_NUM_MAC_ADDRESSES);
2472	if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2473		return -EBUSY;
2474
2475	wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2476	wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2477		     wlvif->hw_queue_base);
2478
2479	for (i = 0; i < NUM_TX_QUEUES; i++) {
2480		wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2481		/* register hw queues in mac80211 */
2482		vif->hw_queue[i] = wlvif->hw_queue_base + i;
2483	}
2484
2485adjust_cab_queue:
2486	/* the last places are reserved for cab queues per interface */
2487	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2488		vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2489				 wlvif->hw_queue_base / NUM_TX_QUEUES;
2490	else
2491		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2492
2493	return 0;
2494}
2495
2496static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2497				   struct ieee80211_vif *vif)
2498{
2499	struct wl1271 *wl = hw->priv;
2500	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2501	struct vif_counter_data vif_count;
2502	int ret = 0;
2503	u8 role_type;
2504
2505	if (wl->plt) {
2506		wl1271_error("Adding Interface not allowed while in PLT mode");
2507		return -EBUSY;
2508	}
2509
2510	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2511			     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2512
2513	wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2514		     ieee80211_vif_type_p2p(vif), vif->addr);
2515
2516	wl12xx_get_vif_count(hw, vif, &vif_count);
2517
2518	mutex_lock(&wl->mutex);
2519	ret = wl1271_ps_elp_wakeup(wl);
2520	if (ret < 0)
2521		goto out_unlock;
2522
2523	/*
2524	 * in some very corner case HW recovery scenarios its possible to
2525	 * get here before __wl1271_op_remove_interface is complete, so
2526	 * opt out if that is the case.
2527	 */
2528	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2529	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2530		ret = -EBUSY;
2531		goto out;
2532	}
2533
2534
2535	ret = wl12xx_init_vif_data(wl, vif);
2536	if (ret < 0)
2537		goto out;
2538
2539	wlvif->wl = wl;
2540	role_type = wl12xx_get_role_type(wl, wlvif);
2541	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2542		ret = -EINVAL;
2543		goto out;
2544	}
2545
2546	ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2547	if (ret < 0)
2548		goto out;
2549
2550	if (wl12xx_need_fw_change(wl, vif_count, true)) {
2551		wl12xx_force_active_psm(wl);
2552		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2553		mutex_unlock(&wl->mutex);
2554		wl1271_recovery_work(&wl->recovery_work);
2555		return 0;
2556	}
2557
2558	/*
2559	 * TODO: after the nvs issue will be solved, move this block
2560	 * to start(), and make sure here the driver is ON.
2561	 */
2562	if (wl->state == WLCORE_STATE_OFF) {
2563		/*
2564		 * we still need this in order to configure the fw
2565		 * while uploading the nvs
2566		 */
2567		memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2568
2569		ret = wl12xx_init_fw(wl);
2570		if (ret < 0)
2571			goto out;
2572	}
2573
2574	ret = wl12xx_cmd_role_enable(wl, vif->addr,
2575				     role_type, &wlvif->role_id);
2576	if (ret < 0)
2577		goto out;
2578
2579	ret = wl1271_init_vif_specific(wl, vif);
2580	if (ret < 0)
2581		goto out;
2582
2583	list_add(&wlvif->list, &wl->wlvif_list);
2584	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2585
2586	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2587		wl->ap_count++;
2588	else
2589		wl->sta_count++;
2590out:
2591	wl1271_ps_elp_sleep(wl);
2592out_unlock:
2593	mutex_unlock(&wl->mutex);
2594
2595	return ret;
2596}
2597
2598static void __wl1271_op_remove_interface(struct wl1271 *wl,
2599					 struct ieee80211_vif *vif,
2600					 bool reset_tx_queues)
2601{
2602	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2603	int i, ret;
2604	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2605
2606	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2607
2608	if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2609		return;
2610
2611	/* because of hardware recovery, we may get here twice */
2612	if (wl->state == WLCORE_STATE_OFF)
2613		return;
2614
2615	wl1271_info("down");
2616
2617	if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2618	    wl->scan_wlvif == wlvif) {
2619		/*
2620		 * Rearm the tx watchdog just before idling scan. This
2621		 * prevents just-finished scans from triggering the watchdog
2622		 */
2623		wl12xx_rearm_tx_watchdog_locked(wl);
2624
2625		wl->scan.state = WL1271_SCAN_STATE_IDLE;
2626		memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2627		wl->scan_wlvif = NULL;
2628		wl->scan.req = NULL;
2629		ieee80211_scan_completed(wl->hw, true);
2630	}
2631
2632	if (wl->sched_vif == wlvif)
2633		wl->sched_vif = NULL;
2634
2635	if (wl->roc_vif == vif) {
2636		wl->roc_vif = NULL;
2637		ieee80211_remain_on_channel_expired(wl->hw);
2638	}
2639
2640	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2641		/* disable active roles */
2642		ret = wl1271_ps_elp_wakeup(wl);
2643		if (ret < 0)
2644			goto deinit;
2645
2646		if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2647		    wlvif->bss_type == BSS_TYPE_IBSS) {
2648			if (wl12xx_dev_role_started(wlvif))
2649				wl12xx_stop_dev(wl, wlvif);
2650		}
2651
2652		ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2653		if (ret < 0)
2654			goto deinit;
2655
2656		wl1271_ps_elp_sleep(wl);
2657	}
2658deinit:
2659	wl12xx_tx_reset_wlvif(wl, wlvif);
2660
2661	/* clear all hlids (except system_hlid) */
2662	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2663
2664	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2665	    wlvif->bss_type == BSS_TYPE_IBSS) {
2666		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2667		wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2668		wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2669		wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2670		wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2671	} else {
2672		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2673		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2674		wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2675		wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2676		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2677			wl12xx_free_rate_policy(wl,
2678						&wlvif->ap.ucast_rate_idx[i]);
2679		wl1271_free_ap_keys(wl, wlvif);
2680	}
2681
2682	dev_kfree_skb(wlvif->probereq);
2683	wlvif->probereq = NULL;
2684	if (wl->last_wlvif == wlvif)
2685		wl->last_wlvif = NULL;
2686	list_del(&wlvif->list);
2687	memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2688	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2689	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2690
2691	if (is_ap)
2692		wl->ap_count--;
2693	else
2694		wl->sta_count--;
2695
2696	/*
2697	 * Last AP, have more stations. Configure sleep auth according to STA.
2698	 * Don't do thin on unintended recovery.
2699	 */
2700	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2701	    !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2702		goto unlock;
2703
2704	if (wl->ap_count == 0 && is_ap) {
2705		/* mask ap events */
2706		wl->event_mask &= ~wl->ap_event_mask;
2707		wl1271_event_unmask(wl);
2708	}
2709
2710	if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2711		u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2712		/* Configure for power according to debugfs */
2713		if (sta_auth != WL1271_PSM_ILLEGAL)
2714			wl1271_acx_sleep_auth(wl, sta_auth);
2715		/* Configure for ELP power saving */
2716		else
2717			wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2718	}
2719
2720unlock:
2721	mutex_unlock(&wl->mutex);
2722
2723	del_timer_sync(&wlvif->rx_streaming_timer);
2724	cancel_work_sync(&wlvif->rx_streaming_enable_work);
2725	cancel_work_sync(&wlvif->rx_streaming_disable_work);
2726	cancel_delayed_work_sync(&wlvif->connection_loss_work);
2727	cancel_delayed_work_sync(&wlvif->channel_switch_work);
2728	cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2729
2730	mutex_lock(&wl->mutex);
2731}
2732
2733static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2734				       struct ieee80211_vif *vif)
2735{
2736	struct wl1271 *wl = hw->priv;
2737	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2738	struct wl12xx_vif *iter;
2739	struct vif_counter_data vif_count;
2740
2741	wl12xx_get_vif_count(hw, vif, &vif_count);
2742	mutex_lock(&wl->mutex);
2743
2744	if (wl->state == WLCORE_STATE_OFF ||
2745	    !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2746		goto out;
2747
2748	/*
2749	 * wl->vif can be null here if someone shuts down the interface
2750	 * just when hardware recovery has been started.
2751	 */
2752	wl12xx_for_each_wlvif(wl, iter) {
2753		if (iter != wlvif)
2754			continue;
2755
2756		__wl1271_op_remove_interface(wl, vif, true);
2757		break;
2758	}
2759	WARN_ON(iter != wlvif);
2760	if (wl12xx_need_fw_change(wl, vif_count, false)) {
2761		wl12xx_force_active_psm(wl);
2762		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2763		wl12xx_queue_recovery_work(wl);
2764	}
2765out:
2766	mutex_unlock(&wl->mutex);
2767}
2768
2769static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2770				      struct ieee80211_vif *vif,
2771				      enum nl80211_iftype new_type, bool p2p)
2772{
2773	struct wl1271 *wl = hw->priv;
2774	int ret;
2775
2776	set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2777	wl1271_op_remove_interface(hw, vif);
2778
2779	vif->type = new_type;
2780	vif->p2p = p2p;
2781	ret = wl1271_op_add_interface(hw, vif);
2782
2783	clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2784	return ret;
2785}
2786
2787static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2788{
2789	int ret;
2790	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2791
2792	/*
2793	 * One of the side effects of the JOIN command is that is clears
2794	 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2795	 * to a WPA/WPA2 access point will therefore kill the data-path.
2796	 * Currently the only valid scenario for JOIN during association
2797	 * is on roaming, in which case we will also be given new keys.
2798	 * Keep the below message for now, unless it starts bothering
2799	 * users who really like to roam a lot :)
2800	 */
2801	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2802		wl1271_info("JOIN while associated.");
2803
2804	/* clear encryption type */
2805	wlvif->encryption_type = KEY_NONE;
2806
2807	if (is_ibss)
2808		ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2809	else {
2810		if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2811			/*
2812			 * TODO: this is an ugly workaround for wl12xx fw
2813			 * bug - we are not able to tx/rx after the first
2814			 * start_sta, so make dummy start+stop calls,
2815			 * and then call start_sta again.
2816			 * this should be fixed in the fw.
2817			 */
2818			wl12xx_cmd_role_start_sta(wl, wlvif);
2819			wl12xx_cmd_role_stop_sta(wl, wlvif);
2820		}
2821
2822		ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2823	}
2824
2825	return ret;
2826}
2827
2828static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2829			    int offset)
2830{
2831	u8 ssid_len;
2832	const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2833					 skb->len - offset);
2834
2835	if (!ptr) {
2836		wl1271_error("No SSID in IEs!");
2837		return -ENOENT;
2838	}
2839
2840	ssid_len = ptr[1];
2841	if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2842		wl1271_error("SSID is too long!");
2843		return -EINVAL;
2844	}
2845
2846	wlvif->ssid_len = ssid_len;
2847	memcpy(wlvif->ssid, ptr+2, ssid_len);
2848	return 0;
2849}
2850
2851static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2852{
2853	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2854	struct sk_buff *skb;
2855	int ieoffset;
2856
2857	/* we currently only support setting the ssid from the ap probe req */
2858	if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2859		return -EINVAL;
2860
2861	skb = ieee80211_ap_probereq_get(wl->hw, vif);
2862	if (!skb)
2863		return -EINVAL;
2864
2865	ieoffset = offsetof(struct ieee80211_mgmt,
2866			    u.probe_req.variable);
2867	wl1271_ssid_set(wlvif, skb, ieoffset);
2868	dev_kfree_skb(skb);
2869
2870	return 0;
2871}
2872
2873static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2874			    struct ieee80211_bss_conf *bss_conf,
2875			    u32 sta_rate_set)
2876{
2877	int ieoffset;
2878	int ret;
2879
2880	wlvif->aid = bss_conf->aid;
2881	wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2882	wlvif->beacon_int = bss_conf->beacon_int;
2883	wlvif->wmm_enabled = bss_conf->qos;
2884
2885	set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2886
2887	/*
2888	 * with wl1271, we don't need to update the
2889	 * beacon_int and dtim_period, because the firmware
2890	 * updates it by itself when the first beacon is
2891	 * received after a join.
2892	 */
2893	ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2894	if (ret < 0)
2895		return ret;
2896
2897	/*
2898	 * Get a template for hardware connection maintenance
2899	 */
2900	dev_kfree_skb(wlvif->probereq);
2901	wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2902							wlvif,
2903							NULL);
2904	ieoffset = offsetof(struct ieee80211_mgmt,
2905			    u.probe_req.variable);
2906	wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2907
2908	/* enable the connection monitoring feature */
2909	ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2910	if (ret < 0)
2911		return ret;
2912
2913	/*
2914	 * The join command disable the keep-alive mode, shut down its process,
2915	 * and also clear the template config, so we need to reset it all after
2916	 * the join. The acx_aid starts the keep-alive process, and the order
2917	 * of the commands below is relevant.
2918	 */
2919	ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2920	if (ret < 0)
2921		return ret;
2922
2923	ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2924	if (ret < 0)
2925		return ret;
2926
2927	ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2928	if (ret < 0)
2929		return ret;
2930
2931	ret = wl1271_acx_keep_alive_config(wl, wlvif,
2932					   wlvif->sta.klv_template_id,
2933					   ACX_KEEP_ALIVE_TPL_VALID);
2934	if (ret < 0)
2935		return ret;
2936
2937	/*
2938	 * The default fw psm configuration is AUTO, while mac80211 default
2939	 * setting is off (ACTIVE), so sync the fw with the correct value.
2940	 */
2941	ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2942	if (ret < 0)
2943		return ret;
2944
2945	if (sta_rate_set) {
2946		wlvif->rate_set =
2947			wl1271_tx_enabled_rates_get(wl,
2948						    sta_rate_set,
2949						    wlvif->band);
2950		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2951		if (ret < 0)
2952			return ret;
2953	}
2954
2955	return ret;
2956}
2957
2958static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2959{
2960	int ret;
2961	bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2962
2963	/* make sure we are connected (sta) joined */
2964	if (sta &&
2965	    !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2966		return false;
2967
2968	/* make sure we are joined (ibss) */
2969	if (!sta &&
2970	    test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2971		return false;
2972
2973	if (sta) {
2974		/* use defaults when not associated */
2975		wlvif->aid = 0;
2976
2977		/* free probe-request template */
2978		dev_kfree_skb(wlvif->probereq);
2979		wlvif->probereq = NULL;
2980
2981		/* disable connection monitor features */
2982		ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
2983		if (ret < 0)
2984			return ret;
2985
2986		/* Disable the keep-alive feature */
2987		ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
2988		if (ret < 0)
2989			return ret;
2990
2991		/* disable beacon filtering */
2992		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
2993		if (ret < 0)
2994			return ret;
2995	}
2996
2997	if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2998		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2999
3000		wl12xx_cmd_stop_channel_switch(wl, wlvif);
3001		ieee80211_chswitch_done(vif, false);
3002		cancel_delayed_work(&wlvif->channel_switch_work);
3003	}
3004
3005	/* invalidate keep-alive template */
3006	wl1271_acx_keep_alive_config(wl, wlvif,
3007				     wlvif->sta.klv_template_id,
3008				     ACX_KEEP_ALIVE_TPL_INVALID);
3009
3010	return 0;
3011}
3012
3013static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3014{
3015	wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3016	wlvif->rate_set = wlvif->basic_rate_set;
3017}
3018
3019static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3020				   bool idle)
3021{
3022	bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3023
3024	if (idle == cur_idle)
3025		return;
3026
3027	if (idle) {
3028		clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3029	} else {
3030		/* The current firmware only supports sched_scan in idle */
3031		if (wl->sched_vif == wlvif)
3032			wl->ops->sched_scan_stop(wl, wlvif);
3033
3034		set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3035	}
3036}
3037
3038static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3039			     struct ieee80211_conf *conf, u32 changed)
3040{
3041	int ret;
3042
3043	if (conf->power_level != wlvif->power_level) {
3044		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3045		if (ret < 0)
3046			return ret;
3047
3048		wlvif->power_level = conf->power_level;
3049	}
3050
3051	return 0;
3052}
3053
3054static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3055{
3056	struct wl1271 *wl = hw->priv;
3057	struct wl12xx_vif *wlvif;
3058	struct ieee80211_conf *conf = &hw->conf;
3059	int ret = 0;
3060
3061	wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3062		     " changed 0x%x",
3063		     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3064		     conf->power_level,
3065		     conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3066			 changed);
3067
3068	mutex_lock(&wl->mutex);
3069
3070	if (changed & IEEE80211_CONF_CHANGE_POWER)
3071		wl->power_level = conf->power_level;
3072
3073	if (unlikely(wl->state != WLCORE_STATE_ON))
3074		goto out;
3075
3076	ret = wl1271_ps_elp_wakeup(wl);
3077	if (ret < 0)
3078		goto out;
3079
3080	/* configure each interface */
3081	wl12xx_for_each_wlvif(wl, wlvif) {
3082		ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3083		if (ret < 0)
3084			goto out_sleep;
3085	}
3086
3087out_sleep:
3088	wl1271_ps_elp_sleep(wl);
3089
3090out:
3091	mutex_unlock(&wl->mutex);
3092
3093	return ret;
3094}
3095
3096struct wl1271_filter_params {
3097	bool enabled;
3098	int mc_list_length;
3099	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3100};
3101
3102static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3103				       struct netdev_hw_addr_list *mc_list)
3104{
3105	struct wl1271_filter_params *fp;
3106	struct netdev_hw_addr *ha;
3107
3108	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3109	if (!fp) {
3110		wl1271_error("Out of memory setting filters.");
3111		return 0;
3112	}
3113
3114	/* update multicast filtering parameters */
3115	fp->mc_list_length = 0;
3116	if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3117		fp->enabled = false;
3118	} else {
3119		fp->enabled = true;
3120		netdev_hw_addr_list_for_each(ha, mc_list) {
3121			memcpy(fp->mc_list[fp->mc_list_length],
3122					ha->addr, ETH_ALEN);
3123			fp->mc_list_length++;
3124		}
3125	}
3126
3127	return (u64)(unsigned long)fp;
3128}
3129
3130#define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
3131				  FIF_ALLMULTI | \
3132				  FIF_FCSFAIL | \
3133				  FIF_BCN_PRBRESP_PROMISC | \
3134				  FIF_CONTROL | \
3135				  FIF_OTHER_BSS)
3136
3137static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3138				       unsigned int changed,
3139				       unsigned int *total, u64 multicast)
3140{
3141	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3142	struct wl1271 *wl = hw->priv;
3143	struct wl12xx_vif *wlvif;
3144
3145	int ret;
3146
3147	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3148		     " total %x", changed, *total);
3149
3150	mutex_lock(&wl->mutex);
3151
3152	*total &= WL1271_SUPPORTED_FILTERS;
3153	changed &= WL1271_SUPPORTED_FILTERS;
3154
3155	if (unlikely(wl->state != WLCORE_STATE_ON))
3156		goto out;
3157
3158	ret = wl1271_ps_elp_wakeup(wl);
3159	if (ret < 0)
3160		goto out;
3161
3162	wl12xx_for_each_wlvif(wl, wlvif) {
3163		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3164			if (*total & FIF_ALLMULTI)
3165				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3166								   false,
3167								   NULL, 0);
3168			else if (fp)
3169				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3170							fp->enabled,
3171							fp->mc_list,
3172							fp->mc_list_length);
3173			if (ret < 0)
3174				goto out_sleep;
3175		}
3176	}
3177
3178	/*
3179	 * the fw doesn't provide an api to configure the filters. instead,
3180	 * the filters configuration is based on the active roles / ROC
3181	 * state.
3182	 */
3183
3184out_sleep:
3185	wl1271_ps_elp_sleep(wl);
3186
3187out:
3188	mutex_unlock(&wl->mutex);
3189	kfree(fp);
3190}
3191
3192static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3193				u8 id, u8 key_type, u8 key_size,
3194				const u8 *key, u8 hlid, u32 tx_seq_32,
3195				u16 tx_seq_16)
3196{
3197	struct wl1271_ap_key *ap_key;
3198	int i;
3199
3200	wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3201
3202	if (key_size > MAX_KEY_SIZE)
3203		return -EINVAL;
3204
3205	/*
3206	 * Find next free entry in ap_keys. Also check we are not replacing
3207	 * an existing key.
3208	 */
3209	for (i = 0; i < MAX_NUM_KEYS; i++) {
3210		if (wlvif->ap.recorded_keys[i] == NULL)
3211			break;
3212
3213		if (wlvif->ap.recorded_keys[i]->id == id) {
3214			wl1271_warning("trying to record key replacement");
3215			return -EINVAL;
3216		}
3217	}
3218
3219	if (i == MAX_NUM_KEYS)
3220		return -EBUSY;
3221
3222	ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3223	if (!ap_key)
3224		return -ENOMEM;
3225
3226	ap_key->id = id;
3227	ap_key->key_type = key_type;
3228	ap_key->key_size = key_size;
3229	memcpy(ap_key->key, key, key_size);
3230	ap_key->hlid = hlid;
3231	ap_key->tx_seq_32 = tx_seq_32;
3232	ap_key->tx_seq_16 = tx_seq_16;
3233
3234	wlvif->ap.recorded_keys[i] = ap_key;
3235	return 0;
3236}
3237
3238static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3239{
3240	int i;
3241
3242	for (i = 0; i < MAX_NUM_KEYS; i++) {
3243		kfree(wlvif->ap.recorded_keys[i]);
3244		wlvif->ap.recorded_keys[i] = NULL;
3245	}
3246}
3247
3248static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3249{
3250	int i, ret = 0;
3251	struct wl1271_ap_key *key;
3252	bool wep_key_added = false;
3253
3254	for (i = 0; i < MAX_NUM_KEYS; i++) {
3255		u8 hlid;
3256		if (wlvif->ap.recorded_keys[i] == NULL)
3257			break;
3258
3259		key = wlvif->ap.recorded_keys[i];
3260		hlid = key->hlid;
3261		if (hlid == WL12XX_INVALID_LINK_ID)
3262			hlid = wlvif->ap.bcast_hlid;
3263
3264		ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3265					    key->id, key->key_type,
3266					    key->key_size, key->key,
3267					    hlid, key->tx_seq_32,
3268					    key->tx_seq_16);
3269		if (ret < 0)
3270			goto out;
3271
3272		if (key->key_type == KEY_WEP)
3273			wep_key_added = true;
3274	}
3275
3276	if (wep_key_added) {
3277		ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3278						     wlvif->ap.bcast_hlid);
3279		if (ret < 0)
3280			goto out;
3281	}
3282
3283out:
3284	wl1271_free_ap_keys(wl, wlvif);
3285	return ret;
3286}
3287
3288static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3289		       u16 action, u8 id, u8 key_type,
3290		       u8 key_size, const u8 *key, u32 tx_seq_32,
3291		       u16 tx_seq_16, struct ieee80211_sta *sta)
3292{
3293	int ret;
3294	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3295
3296	if (is_ap) {
3297		struct wl1271_station *wl_sta;
3298		u8 hlid;
3299
3300		if (sta) {
3301			wl_sta = (struct wl1271_station *)sta->drv_priv;
3302			hlid = wl_sta->hlid;
3303		} else {
3304			hlid = wlvif->ap.bcast_hlid;
3305		}
3306
3307		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3308			/*
3309			 * We do not support removing keys after AP shutdown.
3310			 * Pretend we do to make mac80211 happy.
3311			 */
3312			if (action != KEY_ADD_OR_REPLACE)
3313				return 0;
3314
3315			ret = wl1271_record_ap_key(wl, wlvif, id,
3316					     key_type, key_size,
3317					     key, hlid, tx_seq_32,
3318					     tx_seq_16);
3319		} else {
3320			ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3321					     id, key_type, key_size,
3322					     key, hlid, tx_seq_32,
3323					     tx_seq_16);
3324		}
3325
3326		if (ret < 0)
3327			return ret;
3328	} else {
3329		const u8 *addr;
3330		static const u8 bcast_addr[ETH_ALEN] = {
3331			0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3332		};
3333
3334		addr = sta ? sta->addr : bcast_addr;
3335
3336		if (is_zero_ether_addr(addr)) {
3337			/* We dont support TX only encryption */
3338			return -EOPNOTSUPP;
3339		}
3340
3341		/* The wl1271 does not allow to remove unicast keys - they
3342		   will be cleared automatically on next CMD_JOIN. Ignore the
3343		   request silently, as we dont want the mac80211 to emit
3344		   an error message. */
3345		if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3346			return 0;
3347
3348		/* don't remove key if hlid was already deleted */
3349		if (action == KEY_REMOVE &&
3350		    wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3351			return 0;
3352
3353		ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3354					     id, key_type, key_size,
3355					     key, addr, tx_seq_32,
3356					     tx_seq_16);
3357		if (ret < 0)
3358			return ret;
3359
3360	}
3361
3362	return 0;
3363}
3364
3365static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3366			     struct ieee80211_vif *vif,
3367			     struct ieee80211_sta *sta,
3368			     struct ieee80211_key_conf *key_conf)
3369{
3370	struct wl1271 *wl = hw->priv;
3371	int ret;
3372	bool might_change_spare =
3373		key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3374		key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3375
3376	if (might_change_spare) {
3377		/*
3378		 * stop the queues and flush to ensure the next packets are
3379		 * in sync with FW spare block accounting
3380		 */
3381		wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3382		wl1271_tx_flush(wl);
3383	}
3384
3385	mutex_lock(&wl->mutex);
3386
3387	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3388		ret = -EAGAIN;
3389		goto out_wake_queues;
3390	}
3391
3392	ret = wl1271_ps_elp_wakeup(wl);
3393	if (ret < 0)
3394		goto out_wake_queues;
3395
3396	ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3397
3398	wl1271_ps_elp_sleep(wl);
3399
3400out_wake_queues:
3401	if (might_change_spare)
3402		wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3403
3404	mutex_unlock(&wl->mutex);
3405
3406	return ret;
3407}
3408
3409int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3410		   struct ieee80211_vif *vif,
3411		   struct ieee80211_sta *sta,
3412		   struct ieee80211_key_conf *key_conf)
3413{
3414	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3415	int ret;
3416	u32 tx_seq_32 = 0;
3417	u16 tx_seq_16 = 0;
3418	u8 key_type;
3419	u8 hlid;
3420
3421	wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3422
3423	wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3424	wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3425		     key_conf->cipher, key_conf->keyidx,
3426		     key_conf->keylen, key_conf->flags);
3427	wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3428
3429	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3430		if (sta) {
3431			struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3432			hlid = wl_sta->hlid;
3433		} else {
3434			hlid = wlvif->ap.bcast_hlid;
3435		}
3436	else
3437		hlid = wlvif->sta.hlid;
3438
3439	if (hlid != WL12XX_INVALID_LINK_ID) {
3440		u64 tx_seq = wl->links[hlid].total_freed_pkts;
3441		tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3442		tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3443	}
3444
3445	switch (key_conf->cipher) {
3446	case WLAN_CIPHER_SUITE_WEP40:
3447	case WLAN_CIPHER_SUITE_WEP104:
3448		key_type = KEY_WEP;
3449
3450		key_conf->hw_key_idx = key_conf->keyidx;
3451		break;
3452	case WLAN_CIPHER_SUITE_TKIP:
3453		key_type = KEY_TKIP;
3454		key_conf->hw_key_idx = key_conf->keyidx;
3455		break;
3456	case WLAN_CIPHER_SUITE_CCMP:
3457		key_type = KEY_AES;
3458		key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3459		break;
3460	case WL1271_CIPHER_SUITE_GEM:
3461		key_type = KEY_GEM;
3462		break;
3463	default:
3464		wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3465
3466		return -EOPNOTSUPP;
3467	}
3468
3469	switch (cmd) {
3470	case SET_KEY:
3471		ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3472				 key_conf->keyidx, key_type,
3473				 key_conf->keylen, key_conf->key,
3474				 tx_seq_32, tx_seq_16, sta);
3475		if (ret < 0) {
3476			wl1271_error("Could not add or replace key");
3477			return ret;
3478		}
3479
3480		/*
3481		 * reconfiguring arp response if the unicast (or common)
3482		 * encryption key type was changed
3483		 */
3484		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3485		    (sta || key_type == KEY_WEP) &&
3486		    wlvif->encryption_type != key_type) {
3487			wlvif->encryption_type = key_type;
3488			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3489			if (ret < 0) {
3490				wl1271_warning("build arp rsp failed: %d", ret);
3491				return ret;
3492			}
3493		}
3494		break;
3495
3496	case DISABLE_KEY:
3497		ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3498				     key_conf->keyidx, key_type,
3499				     key_conf->keylen, key_conf->key,
3500				     0, 0, sta);
3501		if (ret < 0) {
3502			wl1271_error("Could not remove key");
3503			return ret;
3504		}
3505		break;
3506
3507	default:
3508		wl1271_error("Unsupported key cmd 0x%x", cmd);
3509		return -EOPNOTSUPP;
3510	}
3511
3512	return ret;
3513}
3514EXPORT_SYMBOL_GPL(wlcore_set_key);
3515
3516static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3517					  struct ieee80211_vif *vif,
3518					  int key_idx)
3519{
3520	struct wl1271 *wl = hw->priv;
3521	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3522	int ret;
3523
3524	wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3525		     key_idx);
3526
3527	/* we don't handle unsetting of default key */
3528	if (key_idx == -1)
3529		return;
3530
3531	mutex_lock(&wl->mutex);
3532
3533	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3534		ret = -EAGAIN;
3535		goto out_unlock;
3536	}
3537
3538	ret = wl1271_ps_elp_wakeup(wl);
3539	if (ret < 0)
3540		goto out_unlock;
3541
3542	wlvif->default_key = key_idx;
3543
3544	/* the default WEP key needs to be configured at least once */
3545	if (wlvif->encryption_type == KEY_WEP) {
3546		ret = wl12xx_cmd_set_default_wep_key(wl,
3547				key_idx,
3548				wlvif->sta.hlid);
3549		if (ret < 0)
3550			goto out_sleep;
3551	}
3552
3553out_sleep:
3554	wl1271_ps_elp_sleep(wl);
3555
3556out_unlock:
3557	mutex_unlock(&wl->mutex);
3558}
3559
3560void wlcore_regdomain_config(struct wl1271 *wl)
3561{
3562	int ret;
3563
3564	if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3565		return;
3566
3567	mutex_lock(&wl->mutex);
3568
3569	if (unlikely(wl->state != WLCORE_STATE_ON))
3570		goto out;
3571
3572	ret = wl1271_ps_elp_wakeup(wl);
3573	if (ret < 0)
3574		goto out;
3575
3576	ret = wlcore_cmd_regdomain_config_locked(wl);
3577	if (ret < 0) {
3578		wl12xx_queue_recovery_work(wl);
3579		goto out;
3580	}
3581
3582	wl1271_ps_elp_sleep(wl);
3583out:
3584	mutex_unlock(&wl->mutex);
3585}
3586
3587static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3588			     struct ieee80211_vif *vif,
3589			     struct ieee80211_scan_request *hw_req)
3590{
3591	struct cfg80211_scan_request *req = &hw_req->req;
3592	struct wl1271 *wl = hw->priv;
3593	int ret;
3594	u8 *ssid = NULL;
3595	size_t len = 0;
3596
3597	wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3598
3599	if (req->n_ssids) {
3600		ssid = req->ssids[0].ssid;
3601		len = req->ssids[0].ssid_len;
3602	}
3603
3604	mutex_lock(&wl->mutex);
3605
3606	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3607		/*
3608		 * We cannot return -EBUSY here because cfg80211 will expect
3609		 * a call to ieee80211_scan_completed if we do - in this case
3610		 * there won't be any call.
3611		 */
3612		ret = -EAGAIN;
3613		goto out;
3614	}
3615
3616	ret = wl1271_ps_elp_wakeup(wl);
3617	if (ret < 0)
3618		goto out;
3619
3620	/* fail if there is any role in ROC */
3621	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3622		/* don't allow scanning right now */
3623		ret = -EBUSY;
3624		goto out_sleep;
3625	}
3626
3627	ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3628out_sleep:
3629	wl1271_ps_elp_sleep(wl);
3630out:
3631	mutex_unlock(&wl->mutex);
3632
3633	return ret;
3634}
3635
3636static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3637				     struct ieee80211_vif *vif)
3638{
3639	struct wl1271 *wl = hw->priv;
3640	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3641	int ret;
3642
3643	wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3644
3645	mutex_lock(&wl->mutex);
3646
3647	if (unlikely(wl->state != WLCORE_STATE_ON))
3648		goto out;
3649
3650	if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3651		goto out;
3652
3653	ret = wl1271_ps_elp_wakeup(wl);
3654	if (ret < 0)
3655		goto out;
3656
3657	if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3658		ret = wl->ops->scan_stop(wl, wlvif);
3659		if (ret < 0)
3660			goto out_sleep;
3661	}
3662
3663	/*
3664	 * Rearm the tx watchdog just before idling scan. This
3665	 * prevents just-finished scans from triggering the watchdog
3666	 */
3667	wl12xx_rearm_tx_watchdog_locked(wl);
3668
3669	wl->scan.state = WL1271_SCAN_STATE_IDLE;
3670	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3671	wl->scan_wlvif = NULL;
3672	wl->scan.req = NULL;
3673	ieee80211_scan_completed(wl->hw, true);
3674
3675out_sleep:
3676	wl1271_ps_elp_sleep(wl);
3677out:
3678	mutex_unlock(&wl->mutex);
3679
3680	cancel_delayed_work_sync(&wl->scan_complete_work);
3681}
3682
3683static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3684				      struct ieee80211_vif *vif,
3685				      struct cfg80211_sched_scan_request *req,
3686				      struct ieee80211_scan_ies *ies)
3687{
3688	struct wl1271 *wl = hw->priv;
3689	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3690	int ret;
3691
3692	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3693
3694	mutex_lock(&wl->mutex);
3695
3696	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3697		ret = -EAGAIN;
3698		goto out;
3699	}
3700
3701	ret = wl1271_ps_elp_wakeup(wl);
3702	if (ret < 0)
3703		goto out;
3704
3705	ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3706	if (ret < 0)
3707		goto out_sleep;
3708
3709	wl->sched_vif = wlvif;
3710
3711out_sleep:
3712	wl1271_ps_elp_sleep(wl);
3713out:
3714	mutex_unlock(&wl->mutex);
3715	return ret;
3716}
3717
3718static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3719				     struct ieee80211_vif *vif)
3720{
3721	struct wl1271 *wl = hw->priv;
3722	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3723	int ret;
3724
3725	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3726
3727	mutex_lock(&wl->mutex);
3728
3729	if (unlikely(wl->state != WLCORE_STATE_ON))
3730		goto out;
3731
3732	ret = wl1271_ps_elp_wakeup(wl);
3733	if (ret < 0)
3734		goto out;
3735
3736	wl->ops->sched_scan_stop(wl, wlvif);
3737
3738	wl1271_ps_elp_sleep(wl);
3739out:
3740	mutex_unlock(&wl->mutex);
3741
3742	return 0;
3743}
3744
3745static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3746{
3747	struct wl1271 *wl = hw->priv;
3748	int ret = 0;
3749
3750	mutex_lock(&wl->mutex);
3751
3752	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3753		ret = -EAGAIN;
3754		goto out;
3755	}
3756
3757	ret = wl1271_ps_elp_wakeup(wl);
3758	if (ret < 0)
3759		goto out;
3760
3761	ret = wl1271_acx_frag_threshold(wl, value);
3762	if (ret < 0)
3763		wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3764
3765	wl1271_ps_elp_sleep(wl);
3766
3767out:
3768	mutex_unlock(&wl->mutex);
3769
3770	return ret;
3771}
3772
3773static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3774{
3775	struct wl1271 *wl = hw->priv;
3776	struct wl12xx_vif *wlvif;
3777	int ret = 0;
3778
3779	mutex_lock(&wl->mutex);
3780
3781	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3782		ret = -EAGAIN;
3783		goto out;
3784	}
3785
3786	ret = wl1271_ps_elp_wakeup(wl);
3787	if (ret < 0)
3788		goto out;
3789
3790	wl12xx_for_each_wlvif(wl, wlvif) {
3791		ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3792		if (ret < 0)
3793			wl1271_warning("set rts threshold failed: %d", ret);
3794	}
3795	wl1271_ps_elp_sleep(wl);
3796
3797out:
3798	mutex_unlock(&wl->mutex);
3799
3800	return ret;
3801}
3802
3803static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3804{
3805	int len;
3806	const u8 *next, *end = skb->data + skb->len;
3807	u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3808					skb->len - ieoffset);
3809	if (!ie)
3810		return;
3811	len = ie[1] + 2;
3812	next = ie + len;
3813	memmove(ie, next, end - next);
3814	skb_trim(skb, skb->len - len);
3815}
3816
3817static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3818					    unsigned int oui, u8 oui_type,
3819					    int ieoffset)
3820{
3821	int len;
3822	const u8 *next, *end = skb->data + skb->len;
3823	u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3824					       skb->data + ieoffset,
3825					       skb->len - ieoffset);
3826	if (!ie)
3827		return;
3828	len = ie[1] + 2;
3829	next = ie + len;
3830	memmove(ie, next, end - next);
3831	skb_trim(skb, skb->len - len);
3832}
3833
3834static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3835					 struct ieee80211_vif *vif)
3836{
3837	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3838	struct sk_buff *skb;
3839	int ret;
3840
3841	skb = ieee80211_proberesp_get(wl->hw, vif);
3842	if (!skb)
3843		return -EOPNOTSUPP;
3844
3845	ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3846				      CMD_TEMPL_AP_PROBE_RESPONSE,
3847				      skb->data,
3848				      skb->len, 0,
3849				      rates);
3850	dev_kfree_skb(skb);
3851
3852	if (ret < 0)
3853		goto out;
3854
3855	wl1271_debug(DEBUG_AP, "probe response updated");
3856	set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3857
3858out:
3859	return ret;
3860}
3861
3862static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3863					     struct ieee80211_vif *vif,
3864					     u8 *probe_rsp_data,
3865					     size_t probe_rsp_len,
3866					     u32 rates)
3867{
3868	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3869	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3870	u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3871	int ssid_ie_offset, ie_offset, templ_len;
3872	const u8 *ptr;
3873
3874	/* no need to change probe response if the SSID is set correctly */
3875	if (wlvif->ssid_len > 0)
3876		return wl1271_cmd_template_set(wl, wlvif->role_id,
3877					       CMD_TEMPL_AP_PROBE_RESPONSE,
3878					       probe_rsp_data,
3879					       probe_rsp_len, 0,
3880					       rates);
3881
3882	if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3883		wl1271_error("probe_rsp template too big");
3884		return -EINVAL;
3885	}
3886
3887	/* start searching from IE offset */
3888	ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3889
3890	ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3891			       probe_rsp_len - ie_offset);
3892	if (!ptr) {
3893		wl1271_error("No SSID in beacon!");
3894		return -EINVAL;
3895	}
3896
3897	ssid_ie_offset = ptr - probe_rsp_data;
3898	ptr += (ptr[1] + 2);
3899
3900	memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3901
3902	/* insert SSID from bss_conf */
3903	probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3904	probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3905	memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3906	       bss_conf->ssid, bss_conf->ssid_len);
3907	templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3908
3909	memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3910	       ptr, probe_rsp_len - (ptr - probe_rsp_data));
3911	templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3912
3913	return wl1271_cmd_template_set(wl, wlvif->role_id,
3914				       CMD_TEMPL_AP_PROBE_RESPONSE,
3915				       probe_rsp_templ,
3916				       templ_len, 0,
3917				       rates);
3918}
3919
3920static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3921				       struct ieee80211_vif *vif,
3922				       struct ieee80211_bss_conf *bss_conf,
3923				       u32 changed)
3924{
3925	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3926	int ret = 0;
3927
3928	if (changed & BSS_CHANGED_ERP_SLOT) {
3929		if (bss_conf->use_short_slot)
3930			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3931		else
3932			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3933		if (ret < 0) {
3934			wl1271_warning("Set slot time failed %d", ret);
3935			goto out;
3936		}
3937	}
3938
3939	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3940		if (bss_conf->use_short_preamble)
3941			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3942		else
3943			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3944	}
3945
3946	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3947		if (bss_conf->use_cts_prot)
3948			ret = wl1271_acx_cts_protect(wl, wlvif,
3949						     CTSPROTECT_ENABLE);
3950		else
3951			ret = wl1271_acx_cts_protect(wl, wlvif,
3952						     CTSPROTECT_DISABLE);
3953		if (ret < 0) {
3954			wl1271_warning("Set ctsprotect failed %d", ret);
3955			goto out;
3956		}
3957	}
3958
3959out:
3960	return ret;
3961}
3962
3963static int wlcore_set_beacon_template(struct wl1271 *wl,
3964				      struct ieee80211_vif *vif,
3965				      bool is_ap)
3966{
3967	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3968	struct ieee80211_hdr *hdr;
3969	u32 min_rate;
3970	int ret;
3971	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
3972	struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3973	u16 tmpl_id;
3974
3975	if (!beacon) {
3976		ret = -EINVAL;
3977		goto out;
3978	}
3979
3980	wl1271_debug(DEBUG_MASTER, "beacon updated");
3981
3982	ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
3983	if (ret < 0) {
3984		dev_kfree_skb(beacon);
3985		goto out;
3986	}
3987	min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3988	tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3989		CMD_TEMPL_BEACON;
3990	ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3991				      beacon->data,
3992				      beacon->len, 0,
3993				      min_rate);
3994	if (ret < 0) {
3995		dev_kfree_skb(beacon);
3996		goto out;
3997	}
3998
3999	wlvif->wmm_enabled =
4000		cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4001					WLAN_OUI_TYPE_MICROSOFT_WMM,
4002					beacon->data + ieoffset,
4003					beacon->len - ieoffset);
4004
4005	/*
4006	 * In case we already have a probe-resp beacon set explicitly
4007	 * by usermode, don't use the beacon data.
4008	 */
4009	if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4010		goto end_bcn;
4011
4012	/* remove TIM ie from probe response */
4013	wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4014
4015	/*
4016	 * remove p2p ie from probe response.
4017	 * the fw reponds to probe requests that don't include
4018	 * the p2p ie. probe requests with p2p ie will be passed,
4019	 * and will be responded by the supplicant (the spec
4020	 * forbids including the p2p ie when responding to probe
4021	 * requests that didn't include it).
4022	 */
4023	wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4024				WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4025
4026	hdr = (struct ieee80211_hdr *) beacon->data;
4027	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4028					 IEEE80211_STYPE_PROBE_RESP);
4029	if (is_ap)
4030		ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4031							   beacon->data,
4032							   beacon->len,
4033							   min_rate);
4034	else
4035		ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4036					      CMD_TEMPL_PROBE_RESPONSE,
4037					      beacon->data,
4038					      beacon->len, 0,
4039					      min_rate);
4040end_bcn:
4041	dev_kfree_skb(beacon);
4042	if (ret < 0)
4043		goto out;
4044
4045out:
4046	return ret;
4047}
4048
4049static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4050					  struct ieee80211_vif *vif,
4051					  struct ieee80211_bss_conf *bss_conf,
4052					  u32 changed)
4053{
4054	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4055	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4056	int ret = 0;
4057
4058	if (changed & BSS_CHANGED_BEACON_INT) {
4059		wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4060			bss_conf->beacon_int);
4061
4062		wlvif->beacon_int = bss_conf->beacon_int;
4063	}
4064
4065	if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4066		u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4067
4068		wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4069	}
4070
4071	if (changed & BSS_CHANGED_BEACON) {
4072		ret = wlcore_set_beacon_template(wl, vif, is_ap);
4073		if (ret < 0)
4074			goto out;
4075	}
4076
4077out:
4078	if (ret != 0)
4079		wl1271_error("beacon info change failed: %d", ret);
4080	return ret;
4081}
4082
4083/* AP mode changes */
4084static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4085				       struct ieee80211_vif *vif,
4086				       struct ieee80211_bss_conf *bss_conf,
4087				       u32 changed)
4088{
4089	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4090	int ret = 0;
4091
4092	if (changed & BSS_CHANGED_BASIC_RATES) {
4093		u32 rates = bss_conf->basic_rates;
4094
4095		wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4096								 wlvif->band);
4097		wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4098							wlvif->basic_rate_set);
4099
4100		ret = wl1271_init_ap_rates(wl, wlvif);
4101		if (ret < 0) {
4102			wl1271_error("AP rate policy change failed %d", ret);
4103			goto out;
4104		}
4105
4106		ret = wl1271_ap_init_templates(wl, vif);
4107		if (ret < 0)
4108			goto out;
4109
4110		ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
4111		if (ret < 0)
4112			goto out;
4113
4114		ret = wlcore_set_beacon_template(wl, vif, true);
4115		if (ret < 0)
4116			goto out;
4117	}
4118
4119	ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4120	if (ret < 0)
4121		goto out;
4122
4123	if (changed & BSS_CHANGED_BEACON_ENABLED) {
4124		if (bss_conf->enable_beacon) {
4125			if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4126				ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4127				if (ret < 0)
4128					goto out;
4129
4130				ret = wl1271_ap_init_hwenc(wl, wlvif);
4131				if (ret < 0)
4132					goto out;
4133
4134				set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4135				wl1271_debug(DEBUG_AP, "started AP");
4136			}
4137		} else {
4138			if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4139				/*
4140				 * AP might be in ROC in case we have just
4141				 * sent auth reply. handle it.
4142				 */
4143				if (test_bit(wlvif->role_id, wl->roc_map))
4144					wl12xx_croc(wl, wlvif->role_id);
4145
4146				ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4147				if (ret < 0)
4148					goto out;
4149
4150				clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4151				clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4152					  &wlvif->flags);
4153				wl1271_debug(DEBUG_AP, "stopped AP");
4154			}
4155		}
4156	}
4157
4158	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4159	if (ret < 0)
4160		goto out;
4161
4162	/* Handle HT information change */
4163	if ((changed & BSS_CHANGED_HT) &&
4164	    (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4165		ret = wl1271_acx_set_ht_information(wl, wlvif,
4166					bss_conf->ht_operation_mode);
4167		if (ret < 0) {
4168			wl1271_warning("Set ht information failed %d", ret);
4169			goto out;
4170		}
4171	}
4172
4173out:
4174	return;
4175}
4176
4177static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4178			    struct ieee80211_bss_conf *bss_conf,
4179			    u32 sta_rate_set)
4180{
4181	u32 rates;
4182	int ret;
4183
4184	wl1271_debug(DEBUG_MAC80211,
4185	     "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4186	     bss_conf->bssid, bss_conf->aid,
4187	     bss_conf->beacon_int,
4188	     bss_conf->basic_rates, sta_rate_set);
4189
4190	wlvif->beacon_int = bss_conf->beacon_int;
4191	rates = bss_conf->basic_rates;
4192	wlvif->basic_rate_set =
4193		wl1271_tx_enabled_rates_get(wl, rates,
4194					    wlvif->band);
4195	wlvif->basic_rate =
4196		wl1271_tx_min_rate_get(wl,
4197				       wlvif->basic_rate_set);
4198
4199	if (sta_rate_set)
4200		wlvif->rate_set =
4201			wl1271_tx_enabled_rates_get(wl,
4202						sta_rate_set,
4203						wlvif->band);
4204
4205	/* we only support sched_scan while not connected */
4206	if (wl->sched_vif == wlvif)
4207		wl->ops->sched_scan_stop(wl, wlvif);
4208
4209	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4210	if (ret < 0)
4211		return ret;
4212
4213	ret = wl12xx_cmd_build_null_data(wl, wlvif);
4214	if (ret < 0)
4215		return ret;
4216
4217	ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4218	if (ret < 0)
4219		return ret;
4220
4221	wlcore_set_ssid(wl, wlvif);
4222
4223	set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4224
4225	return 0;
4226}
4227
4228static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4229{
4230	int ret;
4231
4232	/* revert back to minimum rates for the current band */
4233	wl1271_set_band_rate(wl, wlvif);
4234	wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4235
4236	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4237	if (ret < 0)
4238		return ret;
4239
4240	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4241	    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4242		ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4243		if (ret < 0)
4244			return ret;
4245	}
4246
4247	clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4248	return 0;
4249}
4250/* STA/IBSS mode changes */
4251static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4252					struct ieee80211_vif *vif,
4253					struct ieee80211_bss_conf *bss_conf,
4254					u32 changed)
4255{
4256	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4257	bool do_join = false;
4258	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4259	bool ibss_joined = false;
4260	u32 sta_rate_set = 0;
4261	int ret;
4262	struct ieee80211_sta *sta;
4263	bool sta_exists = false;
4264	struct ieee80211_sta_ht_cap sta_ht_cap;
4265
4266	if (is_ibss) {
4267		ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4268						     changed);
4269		if (ret < 0)
4270			goto out;
4271	}
4272
4273	if (changed & BSS_CHANGED_IBSS) {
4274		if (bss_conf->ibss_joined) {
4275			set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4276			ibss_joined = true;
4277		} else {
4278			wlcore_unset_assoc(wl, wlvif);
4279			wl12xx_cmd_role_stop_sta(wl, wlvif);
4280		}
4281	}
4282
4283	if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4284		do_join = true;
4285
4286	/* Need to update the SSID (for filtering etc) */
4287	if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4288		do_join = true;
4289
4290	if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4291		wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4292			     bss_conf->enable_beacon ? "enabled" : "disabled");
4293
4294		do_join = true;
4295	}
4296
4297	if (changed & BSS_CHANGED_IDLE && !is_ibss)
4298		wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4299
4300	if (changed & BSS_CHANGED_CQM) {
4301		bool enable = false;
4302		if (bss_conf->cqm_rssi_thold)
4303			enable = true;
4304		ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4305						  bss_conf->cqm_rssi_thold,
4306						  bss_conf->cqm_rssi_hyst);
4307		if (ret < 0)
4308			goto out;
4309		wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4310	}
4311
4312	if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4313		       BSS_CHANGED_ASSOC)) {
4314		rcu_read_lock();
4315		sta = ieee80211_find_sta(vif, bss_conf->bssid);
4316		if (sta) {
4317			u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4318
4319			/* save the supp_rates of the ap */
4320			sta_rate_set = sta->supp_rates[wlvif->band];
4321			if (sta->ht_cap.ht_supported)
4322				sta_rate_set |=
4323					(rx_mask[0] << HW_HT_RATES_OFFSET) |
4324					(rx_mask[1] << HW_MIMO_RATES_OFFSET);
4325			sta_ht_cap = sta->ht_cap;
4326			sta_exists = true;
4327		}
4328
4329		rcu_read_unlock();
4330	}
4331
4332	if (changed & BSS_CHANGED_BSSID) {
4333		if (!is_zero_ether_addr(bss_conf->bssid)) {
4334			ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4335					       sta_rate_set);
4336			if (ret < 0)
4337				goto out;
4338
4339			/* Need to update the BSSID (for filtering etc) */
4340			do_join = true;
4341		} else {
4342			ret = wlcore_clear_bssid(wl, wlvif);
4343			if (ret < 0)
4344				goto out;
4345		}
4346	}
4347
4348	if (changed & BSS_CHANGED_IBSS) {
4349		wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4350			     bss_conf->ibss_joined);
4351
4352		if (bss_conf->ibss_joined) {
4353			u32 rates = bss_conf->basic_rates;
4354			wlvif->basic_rate_set =
4355				wl1271_tx_enabled_rates_get(wl, rates,
4356							    wlvif->band);
4357			wlvif->basic_rate =
4358				wl1271_tx_min_rate_get(wl,
4359						       wlvif->basic_rate_set);
4360
4361			/* by default, use 11b + OFDM rates */
4362			wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4363			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4364			if (ret < 0)
4365				goto out;
4366		}
4367	}
4368
4369	if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4370		/* enable beacon filtering */
4371		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4372		if (ret < 0)
4373			goto out;
4374	}
4375
4376	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4377	if (ret < 0)
4378		goto out;
4379
4380	if (do_join) {
4381		ret = wlcore_join(wl, wlvif);
4382		if (ret < 0) {
4383			wl1271_warning("cmd join failed %d", ret);
4384			goto out;
4385		}
4386	}
4387
4388	if (changed & BSS_CHANGED_ASSOC) {
4389		if (bss_conf->assoc) {
4390			ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4391					       sta_rate_set);
4392			if (ret < 0)
4393				goto out;
4394
4395			if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4396				wl12xx_set_authorized(wl, wlvif);
4397		} else {
4398			wlcore_unset_assoc(wl, wlvif);
4399		}
4400	}
4401
4402	if (changed & BSS_CHANGED_PS) {
4403		if ((bss_conf->ps) &&
4404		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4405		    !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4406			int ps_mode;
4407			char *ps_mode_str;
4408
4409			if (wl->conf.conn.forced_ps) {
4410				ps_mode = STATION_POWER_SAVE_MODE;
4411				ps_mode_str = "forced";
4412			} else {
4413				ps_mode = STATION_AUTO_PS_MODE;
4414				ps_mode_str = "auto";
4415			}
4416
4417			wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4418
4419			ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4420			if (ret < 0)
4421				wl1271_warning("enter %s ps failed %d",
4422					       ps_mode_str, ret);
4423		} else if (!bss_conf->ps &&
4424			   test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4425			wl1271_debug(DEBUG_PSM, "auto ps disabled");
4426
4427			ret = wl1271_ps_set_mode(wl, wlvif,
4428						 STATION_ACTIVE_MODE);
4429			if (ret < 0)
4430				wl1271_warning("exit auto ps failed %d", ret);
4431		}
4432	}
4433
4434	/* Handle new association with HT. Do this after join. */
4435	if (sta_exists) {
4436		bool enabled =
4437			bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4438
4439		ret = wlcore_hw_set_peer_cap(wl,
4440					     &sta_ht_cap,
4441					     enabled,
4442					     wlvif->rate_set,
4443					     wlvif->sta.hlid);
4444		if (ret < 0) {
4445			wl1271_warning("Set ht cap failed %d", ret);
4446			goto out;
4447
4448		}
4449
4450		if (enabled) {
4451			ret = wl1271_acx_set_ht_information(wl, wlvif,
4452						bss_conf->ht_operation_mode);
4453			if (ret < 0) {
4454				wl1271_warning("Set ht information failed %d",
4455					       ret);
4456				goto out;
4457			}
4458		}
4459	}
4460
4461	/* Handle arp filtering. Done after join. */
4462	if ((changed & BSS_CHANGED_ARP_FILTER) ||
4463	    (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4464		__be32 addr = bss_conf->arp_addr_list[0];
4465		wlvif->sta.qos = bss_conf->qos;
4466		WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4467
4468		if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4469			wlvif->ip_addr = addr;
4470			/*
4471			 * The template should have been configured only upon
4472			 * association. however, it seems that the correct ip
4473			 * isn't being set (when sending), so we have to
4474			 * reconfigure the template upon every ip change.
4475			 */
4476			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4477			if (ret < 0) {
4478				wl1271_warning("build arp rsp failed: %d", ret);
4479				goto out;
4480			}
4481
4482			ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4483				(ACX_ARP_FILTER_ARP_FILTERING |
4484				 ACX_ARP_FILTER_AUTO_ARP),
4485				addr);
4486		} else {
4487			wlvif->ip_addr = 0;
4488			ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4489		}
4490
4491		if (ret < 0)
4492			goto out;
4493	}
4494
4495out:
4496	return;
4497}
4498
4499static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4500				       struct ieee80211_vif *vif,
4501				       struct ieee80211_bss_conf *bss_conf,
4502				       u32 changed)
4503{
4504	struct wl1271 *wl = hw->priv;
4505	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4506	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4507	int ret;
4508
4509	wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4510		     wlvif->role_id, (int)changed);
4511
4512	/*
4513	 * make sure to cancel pending disconnections if our association
4514	 * state changed
4515	 */
4516	if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4517		cancel_delayed_work_sync(&wlvif->connection_loss_work);
4518
4519	if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4520	    !bss_conf->enable_beacon)
4521		wl1271_tx_flush(wl);
4522
4523	mutex_lock(&wl->mutex);
4524
4525	if (unlikely(wl->state != WLCORE_STATE_ON))
4526		goto out;
4527
4528	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4529		goto out;
4530
4531	ret = wl1271_ps_elp_wakeup(wl);
4532	if (ret < 0)
4533		goto out;
4534
4535	if ((changed & BSS_CHANGED_TXPOWER) &&
4536	    bss_conf->txpower != wlvif->power_level) {
4537
4538		ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4539		if (ret < 0)
4540			goto out;
4541
4542		wlvif->power_level = bss_conf->txpower;
4543	}
4544
4545	if (is_ap)
4546		wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4547	else
4548		wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4549
4550	wl1271_ps_elp_sleep(wl);
4551
4552out:
4553	mutex_unlock(&wl->mutex);
4554}
4555
4556static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4557				 struct ieee80211_chanctx_conf *ctx)
4558{
4559	wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4560		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4561		     cfg80211_get_chandef_type(&ctx->def));
4562	return 0;
4563}
4564
4565static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4566				     struct ieee80211_chanctx_conf *ctx)
4567{
4568	wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4569		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4570		     cfg80211_get_chandef_type(&ctx->def));
4571}
4572
4573static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4574				     struct ieee80211_chanctx_conf *ctx,
4575				     u32 changed)
4576{
4577	wl1271_debug(DEBUG_MAC80211,
4578		     "mac80211 change chanctx %d (type %d) changed 0x%x",
4579		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4580		     cfg80211_get_chandef_type(&ctx->def), changed);
4581}
4582
4583static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4584					struct ieee80211_vif *vif,
4585					struct ieee80211_chanctx_conf *ctx)
4586{
4587	struct wl1271 *wl = hw->priv;
4588	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4589	int channel = ieee80211_frequency_to_channel(
4590		ctx->def.chan->center_freq);
4591
4592	wl1271_debug(DEBUG_MAC80211,
4593		     "mac80211 assign chanctx (role %d) %d (type %d)",
4594		     wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def));
4595
4596	mutex_lock(&wl->mutex);
4597
4598	wlvif->band = ctx->def.chan->band;
4599	wlvif->channel = channel;
4600	wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4601
4602	/* update default rates according to the band */
4603	wl1271_set_band_rate(wl, wlvif);
4604
4605	mutex_unlock(&wl->mutex);
4606
4607	return 0;
4608}
4609
4610static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4611					   struct ieee80211_vif *vif,
4612					   struct ieee80211_chanctx_conf *ctx)
4613{
4614	struct wl1271 *wl = hw->priv;
4615	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4616
4617	wl1271_debug(DEBUG_MAC80211,
4618		     "mac80211 unassign chanctx (role %d) %d (type %d)",
4619		     wlvif->role_id,
4620		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4621		     cfg80211_get_chandef_type(&ctx->def));
4622
4623	wl1271_tx_flush(wl);
4624}
4625
4626static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4627			     struct ieee80211_vif *vif, u16 queue,
4628			     const struct ieee80211_tx_queue_params *params)
4629{
4630	struct wl1271 *wl = hw->priv;
4631	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4632	u8 ps_scheme;
4633	int ret = 0;
4634
4635	mutex_lock(&wl->mutex);
4636
4637	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4638
4639	if (params->uapsd)
4640		ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4641	else
4642		ps_scheme = CONF_PS_SCHEME_LEGACY;
4643
4644	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4645		goto out;
4646
4647	ret = wl1271_ps_elp_wakeup(wl);
4648	if (ret < 0)
4649		goto out;
4650
4651	/*
4652	 * the txop is confed in units of 32us by the mac80211,
4653	 * we need us
4654	 */
4655	ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4656				params->cw_min, params->cw_max,
4657				params->aifs, params->txop << 5);
4658	if (ret < 0)
4659		goto out_sleep;
4660
4661	ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4662				 CONF_CHANNEL_TYPE_EDCF,
4663				 wl1271_tx_get_queue(queue),
4664				 ps_scheme, CONF_ACK_POLICY_LEGACY,
4665				 0, 0);
4666
4667out_sleep:
4668	wl1271_ps_elp_sleep(wl);
4669
4670out:
4671	mutex_unlock(&wl->mutex);
4672
4673	return ret;
4674}
4675
4676static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4677			     struct ieee80211_vif *vif)
4678{
4679
4680	struct wl1271 *wl = hw->priv;
4681	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4682	u64 mactime = ULLONG_MAX;
4683	int ret;
4684
4685	wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4686
4687	mutex_lock(&wl->mutex);
4688
4689	if (unlikely(wl->state != WLCORE_STATE_ON))
4690		goto out;
4691
4692	ret = wl1271_ps_elp_wakeup(wl);
4693	if (ret < 0)
4694		goto out;
4695
4696	ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4697	if (ret < 0)
4698		goto out_sleep;
4699
4700out_sleep:
4701	wl1271_ps_elp_sleep(wl);
4702
4703out:
4704	mutex_unlock(&wl->mutex);
4705	return mactime;
4706}
4707
4708static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4709				struct survey_info *survey)
4710{
4711	struct ieee80211_conf *conf = &hw->conf;
4712
4713	if (idx != 0)
4714		return -ENOENT;
4715
4716	survey->channel = conf->chandef.chan;
4717	survey->filled = 0;
4718	return 0;
4719}
4720
4721static int wl1271_allocate_sta(struct wl1271 *wl,
4722			     struct wl12xx_vif *wlvif,
4723			     struct ieee80211_sta *sta)
4724{
4725	struct wl1271_station *wl_sta;
4726	int ret;
4727
4728
4729	if (wl->active_sta_count >= wl->max_ap_stations) {
4730		wl1271_warning("could not allocate HLID - too much stations");
4731		return -EBUSY;
4732	}
4733
4734	wl_sta = (struct wl1271_station *)sta->drv_priv;
4735	ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4736	if (ret < 0) {
4737		wl1271_warning("could not allocate HLID - too many links");
4738		return -EBUSY;
4739	}
4740
4741	/* use the previous security seq, if this is a recovery/resume */
4742	wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4743
4744	set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4745	memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4746	wl->active_sta_count++;
4747	return 0;
4748}
4749
4750void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4751{
4752	if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4753		return;
4754
4755	clear_bit(hlid, wlvif->ap.sta_hlid_map);
4756	__clear_bit(hlid, &wl->ap_ps_map);
4757	__clear_bit(hlid, &wl->ap_fw_ps_map);
4758
4759	/*
4760	 * save the last used PN in the private part of iee80211_sta,
4761	 * in case of recovery/suspend
4762	 */
4763	wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
4764
4765	wl12xx_free_link(wl, wlvif, &hlid);
4766	wl->active_sta_count--;
4767
4768	/*
4769	 * rearm the tx watchdog when the last STA is freed - give the FW a
4770	 * chance to return STA-buffered packets before complaining.
4771	 */
4772	if (wl->active_sta_count == 0)
4773		wl12xx_rearm_tx_watchdog_locked(wl);
4774}
4775
4776static int wl12xx_sta_add(struct wl1271 *wl,
4777			  struct wl12xx_vif *wlvif,
4778			  struct ieee80211_sta *sta)
4779{
4780	struct wl1271_station *wl_sta;
4781	int ret = 0;
4782	u8 hlid;
4783
4784	wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4785
4786	ret = wl1271_allocate_sta(wl, wlvif, sta);
4787	if (ret < 0)
4788		return ret;
4789
4790	wl_sta = (struct wl1271_station *)sta->drv_priv;
4791	hlid = wl_sta->hlid;
4792
4793	ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4794	if (ret < 0)
4795		wl1271_free_sta(wl, wlvif, hlid);
4796
4797	return ret;
4798}
4799
4800static int wl12xx_sta_remove(struct wl1271 *wl,
4801			     struct wl12xx_vif *wlvif,
4802			     struct ieee80211_sta *sta)
4803{
4804	struct wl1271_station *wl_sta;
4805	int ret = 0, id;
4806
4807	wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4808
4809	wl_sta = (struct wl1271_station *)sta->drv_priv;
4810	id = wl_sta->hlid;
4811	if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4812		return -EINVAL;
4813
4814	ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
4815	if (ret < 0)
4816		return ret;
4817
4818	wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4819	return ret;
4820}
4821
4822static void wlcore_roc_if_possible(struct wl1271 *wl,
4823				   struct wl12xx_vif *wlvif)
4824{
4825	if (find_first_bit(wl->roc_map,
4826			   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
4827		return;
4828
4829	if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
4830		return;
4831
4832	wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
4833}
4834
4835/*
4836 * when wl_sta is NULL, we treat this call as if coming from a
4837 * pending auth reply.
4838 * wl->mutex must be taken and the FW must be awake when the call
4839 * takes place.
4840 */
4841void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4842			      struct wl1271_station *wl_sta, bool in_conn)
4843{
4844	if (in_conn) {
4845		if (WARN_ON(wl_sta && wl_sta->in_connection))
4846			return;
4847
4848		if (!wlvif->ap_pending_auth_reply &&
4849		    !wlvif->inconn_count)
4850			wlcore_roc_if_possible(wl, wlvif);
4851
4852		if (wl_sta) {
4853			wl_sta->in_connection = true;
4854			wlvif->inconn_count++;
4855		} else {
4856			wlvif->ap_pending_auth_reply = true;
4857		}
4858	} else {
4859		if (wl_sta && !wl_sta->in_connection)
4860			return;
4861
4862		if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
4863			return;
4864
4865		if (WARN_ON(wl_sta && !wlvif->inconn_count))
4866			return;
4867
4868		if (wl_sta) {
4869			wl_sta->in_connection = false;
4870			wlvif->inconn_count--;
4871		} else {
4872			wlvif->ap_pending_auth_reply = false;
4873		}
4874
4875		if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
4876		    test_bit(wlvif->role_id, wl->roc_map))
4877			wl12xx_croc(wl, wlvif->role_id);
4878	}
4879}
4880
4881static int wl12xx_update_sta_state(struct wl1271 *wl,
4882				   struct wl12xx_vif *wlvif,
4883				   struct ieee80211_sta *sta,
4884				   enum ieee80211_sta_state old_state,
4885				   enum ieee80211_sta_state new_state)
4886{
4887	struct wl1271_station *wl_sta;
4888	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4889	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4890	int ret;
4891
4892	wl_sta = (struct wl1271_station *)sta->drv_priv;
4893
4894	/* Add station (AP mode) */
4895	if (is_ap &&
4896	    old_state == IEEE80211_STA_NOTEXIST &&
4897	    new_state == IEEE80211_STA_NONE) {
4898		ret = wl12xx_sta_add(wl, wlvif, sta);
4899		if (ret)
4900			return ret;
4901
4902		wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
4903	}
4904
4905	/* Remove station (AP mode) */
4906	if (is_ap &&
4907	    old_state == IEEE80211_STA_NONE &&
4908	    new_state == IEEE80211_STA_NOTEXIST) {
4909		/* must not fail */
4910		wl12xx_sta_remove(wl, wlvif, sta);
4911
4912		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4913	}
4914
4915	/* Authorize station (AP mode) */
4916	if (is_ap &&
4917	    new_state == IEEE80211_STA_AUTHORIZED) {
4918		ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
4919		if (ret < 0)
4920			return ret;
4921
4922		ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4923						     wl_sta->hlid);
4924		if (ret)
4925			return ret;
4926
4927		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4928	}
4929
4930	/* Authorize station */
4931	if (is_sta &&
4932	    new_state == IEEE80211_STA_AUTHORIZED) {
4933		set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4934		ret = wl12xx_set_authorized(wl, wlvif);
4935		if (ret)
4936			return ret;
4937	}
4938
4939	if (is_sta &&
4940	    old_state == IEEE80211_STA_AUTHORIZED &&
4941	    new_state == IEEE80211_STA_ASSOC) {
4942		clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4943		clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
4944	}
4945
4946	/* save seq number on disassoc (suspend) */
4947	if (is_sta &&
4948	    old_state == IEEE80211_STA_ASSOC &&
4949	    new_state == IEEE80211_STA_AUTH) {
4950		wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
4951		wlvif->total_freed_pkts = 0;
4952	}
4953
4954	/* restore seq number on assoc (resume) */
4955	if (is_sta &&
4956	    old_state == IEEE80211_STA_AUTH &&
4957	    new_state == IEEE80211_STA_ASSOC) {
4958		wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
4959	}
4960
4961	/* clear ROCs on failure or authorization */
4962	if (is_sta &&
4963	    (new_state == IEEE80211_STA_AUTHORIZED ||
4964	     new_state == IEEE80211_STA_NOTEXIST)) {
4965		if (test_bit(wlvif->role_id, wl->roc_map))
4966			wl12xx_croc(wl, wlvif->role_id);
4967	}
4968
4969	if (is_sta &&
4970	    old_state == IEEE80211_STA_NOTEXIST &&
4971	    new_state == IEEE80211_STA_NONE) {
4972		if (find_first_bit(wl->roc_map,
4973				   WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
4974			WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
4975			wl12xx_roc(wl, wlvif, wlvif->role_id,
4976				   wlvif->band, wlvif->channel);
4977		}
4978	}
4979	return 0;
4980}
4981
4982static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4983			       struct ieee80211_vif *vif,
4984			       struct ieee80211_sta *sta,
4985			       enum ieee80211_sta_state old_state,
4986			       enum ieee80211_sta_state new_state)
4987{
4988	struct wl1271 *wl = hw->priv;
4989	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4990	int ret;
4991
4992	wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4993		     sta->aid, old_state, new_state);
4994
4995	mutex_lock(&wl->mutex);
4996
4997	if (unlikely(wl->state != WLCORE_STATE_ON)) {
4998		ret = -EBUSY;
4999		goto out;
5000	}
5001
5002	ret = wl1271_ps_elp_wakeup(wl);
5003	if (ret < 0)
5004		goto out;
5005
5006	ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5007
5008	wl1271_ps_elp_sleep(wl);
5009out:
5010	mutex_unlock(&wl->mutex);
5011	if (new_state < old_state)
5012		return 0;
5013	return ret;
5014}
5015
5016static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5017				  struct ieee80211_vif *vif,
5018				  enum ieee80211_ampdu_mlme_action action,
5019				  struct ieee80211_sta *sta, u16 tid, u16 *ssn,
5020				  u8 buf_size)
5021{
5022	struct wl1271 *wl = hw->priv;
5023	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5024	int ret;
5025	u8 hlid, *ba_bitmap;
5026
5027	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5028		     tid);
5029
5030	/* sanity check - the fields in FW are only 8bits wide */
5031	if (WARN_ON(tid > 0xFF))
5032		return -ENOTSUPP;
5033
5034	mutex_lock(&wl->mutex);
5035
5036	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5037		ret = -EAGAIN;
5038		goto out;
5039	}
5040
5041	if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5042		hlid = wlvif->sta.hlid;
5043	} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5044		struct wl1271_station *wl_sta;
5045
5046		wl_sta = (struct wl1271_station *)sta->drv_priv;
5047		hlid = wl_sta->hlid;
5048	} else {
5049		ret = -EINVAL;
5050		goto out;
5051	}
5052
5053	ba_bitmap = &wl->links[hlid].ba_bitmap;
5054
5055	ret = wl1271_ps_elp_wakeup(wl);
5056	if (ret < 0)
5057		goto out;
5058
5059	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5060		     tid, action);
5061
5062	switch (action) {
5063	case IEEE80211_AMPDU_RX_START:
5064		if (!wlvif->ba_support || !wlvif->ba_allowed) {
5065			ret = -ENOTSUPP;
5066			break;
5067		}
5068
5069		if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5070			ret = -EBUSY;
5071			wl1271_error("exceeded max RX BA sessions");
5072			break;
5073		}
5074
5075		if (*ba_bitmap & BIT(tid)) {
5076			ret = -EINVAL;
5077			wl1271_error("cannot enable RX BA session on active "
5078				     "tid: %d", tid);
5079			break;
5080		}
5081
5082		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5083							 hlid);
5084		if (!ret) {
5085			*ba_bitmap |= BIT(tid);
5086			wl->ba_rx_session_count++;
5087		}
5088		break;
5089
5090	case IEEE80211_AMPDU_RX_STOP:
5091		if (!(*ba_bitmap & BIT(tid))) {
5092			/*
5093			 * this happens on reconfig - so only output a debug
5094			 * message for now, and don't fail the function.
5095			 */
5096			wl1271_debug(DEBUG_MAC80211,
5097				     "no active RX BA session on tid: %d",
5098				     tid);
5099			ret = 0;
5100			break;
5101		}
5102
5103		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5104							 hlid);
5105		if (!ret) {
5106			*ba_bitmap &= ~BIT(tid);
5107			wl->ba_rx_session_count--;
5108		}
5109		break;
5110
5111	/*
5112	 * The BA initiator session management in FW independently.
5113	 * Falling break here on purpose for all TX APDU commands.
5114	 */
5115	case IEEE80211_AMPDU_TX_START:
5116	case IEEE80211_AMPDU_TX_STOP_CONT:
5117	case IEEE80211_AMPDU_TX_STOP_FLUSH:
5118	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5119	case IEEE80211_AMPDU_TX_OPERATIONAL:
5120		ret = -EINVAL;
5121		break;
5122
5123	default:
5124		wl1271_error("Incorrect ampdu action id=%x\n", action);
5125		ret = -EINVAL;
5126	}
5127
5128	wl1271_ps_elp_sleep(wl);
5129
5130out:
5131	mutex_unlock(&wl->mutex);
5132
5133	return ret;
5134}
5135
5136static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5137				   struct ieee80211_vif *vif,
5138				   const struct cfg80211_bitrate_mask *mask)
5139{
5140	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5141	struct wl1271 *wl = hw->priv;
5142	int i, ret = 0;
5143
5144	wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5145		mask->control[NL80211_BAND_2GHZ].legacy,
5146		mask->control[NL80211_BAND_5GHZ].legacy);
5147
5148	mutex_lock(&wl->mutex);
5149
5150	for (i = 0; i < WLCORE_NUM_BANDS; i++)
5151		wlvif->bitrate_masks[i] =
5152			wl1271_tx_enabled_rates_get(wl,
5153						    mask->control[i].legacy,
5154						    i);
5155
5156	if (unlikely(wl->state != WLCORE_STATE_ON))
5157		goto out;
5158
5159	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5160	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5161
5162		ret = wl1271_ps_elp_wakeup(wl);
5163		if (ret < 0)
5164			goto out;
5165
5166		wl1271_set_band_rate(wl, wlvif);
5167		wlvif->basic_rate =
5168			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5169		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5170
5171		wl1271_ps_elp_sleep(wl);
5172	}
5173out:
5174	mutex_unlock(&wl->mutex);
5175
5176	return ret;
5177}
5178
5179static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5180				     struct ieee80211_channel_switch *ch_switch)
5181{
5182	struct wl1271 *wl = hw->priv;
5183	struct wl12xx_vif *wlvif;
5184	int ret;
5185
5186	wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5187
5188	wl1271_tx_flush(wl);
5189
5190	mutex_lock(&wl->mutex);
5191
5192	if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5193		wl12xx_for_each_wlvif_sta(wl, wlvif) {
5194			struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
5195
5196			if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5197				continue;
5198
5199			ieee80211_chswitch_done(vif, false);
5200		}
5201		goto out;
5202	} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5203		goto out;
5204	}
5205
5206	ret = wl1271_ps_elp_wakeup(wl);
5207	if (ret < 0)
5208		goto out;
5209
5210	/* TODO: change mac80211 to pass vif as param */
5211	wl12xx_for_each_wlvif_sta(wl, wlvif) {
5212		unsigned long delay_usec;
5213
5214		if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5215			continue;
5216
5217		ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5218		if (ret)
5219			goto out_sleep;
5220
5221		set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5222
5223		/* indicate failure 5 seconds after channel switch time */
5224		delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5225			     ch_switch->count;
5226		ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5227				usecs_to_jiffies(delay_usec) +
5228				msecs_to_jiffies(5000));
5229	}
5230
5231out_sleep:
5232	wl1271_ps_elp_sleep(wl);
5233
5234out:
5235	mutex_unlock(&wl->mutex);
5236}
5237
5238static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5239			    u32 queues, bool drop)
5240{
5241	struct wl1271 *wl = hw->priv;
5242
5243	wl1271_tx_flush(wl);
5244}
5245
5246static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5247				       struct ieee80211_vif *vif,
5248				       struct ieee80211_channel *chan,
5249				       int duration,
5250				       enum ieee80211_roc_type type)
5251{
5252	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5253	struct wl1271 *wl = hw->priv;
5254	int channel, ret = 0;
5255
5256	channel = ieee80211_frequency_to_channel(chan->center_freq);
5257
5258	wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5259		     channel, wlvif->role_id);
5260
5261	mutex_lock(&wl->mutex);
5262
5263	if (unlikely(wl->state != WLCORE_STATE_ON))
5264		goto out;
5265
5266	/* return EBUSY if we can't ROC right now */
5267	if (WARN_ON(wl->roc_vif ||
5268		    find_first_bit(wl->roc_map,
5269				   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
5270		ret = -EBUSY;
5271		goto out;
5272	}
5273
5274	ret = wl1271_ps_elp_wakeup(wl);
5275	if (ret < 0)
5276		goto out;
5277
5278	ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5279	if (ret < 0)
5280		goto out_sleep;
5281
5282	wl->roc_vif = vif;
5283	ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5284				     msecs_to_jiffies(duration));
5285out_sleep:
5286	wl1271_ps_elp_sleep(wl);
5287out:
5288	mutex_unlock(&wl->mutex);
5289	return ret;
5290}
5291
5292static int __wlcore_roc_completed(struct wl1271 *wl)
5293{
5294	struct wl12xx_vif *wlvif;
5295	int ret;
5296
5297	/* already completed */
5298	if (unlikely(!wl->roc_vif))
5299		return 0;
5300
5301	wlvif = wl12xx_vif_to_data(wl->roc_vif);
5302
5303	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5304		return -EBUSY;
5305
5306	ret = wl12xx_stop_dev(wl, wlvif);
5307	if (ret < 0)
5308		return ret;
5309
5310	wl->roc_vif = NULL;
5311
5312	return 0;
5313}
5314
5315static int wlcore_roc_completed(struct wl1271 *wl)
5316{
5317	int ret;
5318
5319	wl1271_debug(DEBUG_MAC80211, "roc complete");
5320
5321	mutex_lock(&wl->mutex);
5322
5323	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5324		ret = -EBUSY;
5325		goto out;
5326	}
5327
5328	ret = wl1271_ps_elp_wakeup(wl);
5329	if (ret < 0)
5330		goto out;
5331
5332	ret = __wlcore_roc_completed(wl);
5333
5334	wl1271_ps_elp_sleep(wl);
5335out:
5336	mutex_unlock(&wl->mutex);
5337
5338	return ret;
5339}
5340
5341static void wlcore_roc_complete_work(struct work_struct *work)
5342{
5343	struct delayed_work *dwork;
5344	struct wl1271 *wl;
5345	int ret;
5346
5347	dwork = container_of(work, struct delayed_work, work);
5348	wl = container_of(dwork, struct wl1271, roc_complete_work);
5349
5350	ret = wlcore_roc_completed(wl);
5351	if (!ret)
5352		ieee80211_remain_on_channel_expired(wl->hw);
5353}
5354
5355static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5356{
5357	struct wl1271 *wl = hw->priv;
5358
5359	wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5360
5361	/* TODO: per-vif */
5362	wl1271_tx_flush(wl);
5363
5364	/*
5365	 * we can't just flush_work here, because it might deadlock
5366	 * (as we might get called from the same workqueue)
5367	 */
5368	cancel_delayed_work_sync(&wl->roc_complete_work);
5369	wlcore_roc_completed(wl);
5370
5371	return 0;
5372}
5373
5374static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5375				    struct ieee80211_vif *vif,
5376				    struct ieee80211_sta *sta,
5377				    u32 changed)
5378{
5379	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5380	struct wl1271 *wl = hw->priv;
5381
5382	wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
5383}
5384
5385static int wlcore_op_get_rssi(struct ieee80211_hw *hw,
5386			       struct ieee80211_vif *vif,
5387			       struct ieee80211_sta *sta,
5388			       s8 *rssi_dbm)
5389{
5390	struct wl1271 *wl = hw->priv;
5391	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5392	int ret = 0;
5393
5394	wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5395
5396	mutex_lock(&wl->mutex);
5397
5398	if (unlikely(wl->state != WLCORE_STATE_ON))
5399		goto out;
5400
5401	ret = wl1271_ps_elp_wakeup(wl);
5402	if (ret < 0)
5403		goto out_sleep;
5404
5405	ret = wlcore_acx_average_rssi(wl, wlvif, rssi_dbm);
5406	if (ret < 0)
5407		goto out_sleep;
5408
5409out_sleep:
5410	wl1271_ps_elp_sleep(wl);
5411
5412out:
5413	mutex_unlock(&wl->mutex);
5414
5415	return ret;
5416}
5417
5418static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5419{
5420	struct wl1271 *wl = hw->priv;
5421	bool ret = false;
5422
5423	mutex_lock(&wl->mutex);
5424
5425	if (unlikely(wl->state != WLCORE_STATE_ON))
5426		goto out;
5427
5428	/* packets are considered pending if in the TX queue or the FW */
5429	ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5430out:
5431	mutex_unlock(&wl->mutex);
5432
5433	return ret;
5434}
5435
5436/* can't be const, mac80211 writes to this */
5437static struct ieee80211_rate wl1271_rates[] = {
5438	{ .bitrate = 10,
5439	  .hw_value = CONF_HW_BIT_RATE_1MBPS,
5440	  .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5441	{ .bitrate = 20,
5442	  .hw_value = CONF_HW_BIT_RATE_2MBPS,
5443	  .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5444	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5445	{ .bitrate = 55,
5446	  .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5447	  .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5448	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5449	{ .bitrate = 110,
5450	  .hw_value = CONF_HW_BIT_RATE_11MBPS,
5451	  .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5452	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5453	{ .bitrate = 60,
5454	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5455	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5456	{ .bitrate = 90,
5457	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5458	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5459	{ .bitrate = 120,
5460	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5461	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5462	{ .bitrate = 180,
5463	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5464	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5465	{ .bitrate = 240,
5466	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5467	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5468	{ .bitrate = 360,
5469	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5470	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5471	{ .bitrate = 480,
5472	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5473	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5474	{ .bitrate = 540,
5475	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5476	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5477};
5478
5479/* can't be const, mac80211 writes to this */
5480static struct ieee80211_channel wl1271_channels[] = {
5481	{ .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5482	{ .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5483	{ .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5484	{ .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5485	{ .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5486	{ .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5487	{ .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5488	{ .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5489	{ .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5490	{ .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5491	{ .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5492	{ .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5493	{ .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5494	{ .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5495};
5496
5497/* can't be const, mac80211 writes to this */
5498static struct ieee80211_supported_band wl1271_band_2ghz = {
5499	.channels = wl1271_channels,
5500	.n_channels = ARRAY_SIZE(wl1271_channels),
5501	.bitrates = wl1271_rates,
5502	.n_bitrates = ARRAY_SIZE(wl1271_rates),
5503};
5504
5505/* 5 GHz data rates for WL1273 */
5506static struct ieee80211_rate wl1271_rates_5ghz[] = {
5507	{ .bitrate = 60,
5508	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5509	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5510	{ .bitrate = 90,
5511	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5512	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5513	{ .bitrate = 120,
5514	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5515	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5516	{ .bitrate = 180,
5517	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5518	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5519	{ .bitrate = 240,
5520	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5521	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5522	{ .bitrate = 360,
5523	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5524	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5525	{ .bitrate = 480,
5526	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5527	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5528	{ .bitrate = 540,
5529	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5530	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5531};
5532
5533/* 5 GHz band channels for WL1273 */
5534static struct ieee80211_channel wl1271_channels_5ghz[] = {
5535	{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5536	{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5537	{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5538	{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5539	{ .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5540	{ .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5541	{ .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5542	{ .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5543	{ .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5544	{ .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5545	{ .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5546	{ .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5547	{ .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5548	{ .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5549	{ .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5550	{ .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5551	{ .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5552	{ .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5553	{ .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5554	{ .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5555	{ .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5556	{ .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5557	{ .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5558	{ .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5559	{ .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5560	{ .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5561	{ .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5562	{ .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5563	{ .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5564	{ .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5565	{ .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5566};
5567
5568static struct ieee80211_supported_band wl1271_band_5ghz = {
5569	.channels = wl1271_channels_5ghz,
5570	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5571	.bitrates = wl1271_rates_5ghz,
5572	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5573};
5574
5575static const struct ieee80211_ops wl1271_ops = {
5576	.start = wl1271_op_start,
5577	.stop = wlcore_op_stop,
5578	.add_interface = wl1271_op_add_interface,
5579	.remove_interface = wl1271_op_remove_interface,
5580	.change_interface = wl12xx_op_change_interface,
5581#ifdef CONFIG_PM
5582	.suspend = wl1271_op_suspend,
5583	.resume = wl1271_op_resume,
5584#endif
5585	.config = wl1271_op_config,
5586	.prepare_multicast = wl1271_op_prepare_multicast,
5587	.configure_filter = wl1271_op_configure_filter,
5588	.tx = wl1271_op_tx,
5589	.set_key = wlcore_op_set_key,
5590	.hw_scan = wl1271_op_hw_scan,
5591	.cancel_hw_scan = wl1271_op_cancel_hw_scan,
5592	.sched_scan_start = wl1271_op_sched_scan_start,
5593	.sched_scan_stop = wl1271_op_sched_scan_stop,
5594	.bss_info_changed = wl1271_op_bss_info_changed,
5595	.set_frag_threshold = wl1271_op_set_frag_threshold,
5596	.set_rts_threshold = wl1271_op_set_rts_threshold,
5597	.conf_tx = wl1271_op_conf_tx,
5598	.get_tsf = wl1271_op_get_tsf,
5599	.get_survey = wl1271_op_get_survey,
5600	.sta_state = wl12xx_op_sta_state,
5601	.ampdu_action = wl1271_op_ampdu_action,
5602	.tx_frames_pending = wl1271_tx_frames_pending,
5603	.set_bitrate_mask = wl12xx_set_bitrate_mask,
5604	.set_default_unicast_key = wl1271_op_set_default_key_idx,
5605	.channel_switch = wl12xx_op_channel_switch,
5606	.flush = wlcore_op_flush,
5607	.remain_on_channel = wlcore_op_remain_on_channel,
5608	.cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5609	.add_chanctx = wlcore_op_add_chanctx,
5610	.remove_chanctx = wlcore_op_remove_chanctx,
5611	.change_chanctx = wlcore_op_change_chanctx,
5612	.assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5613	.unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5614	.sta_rc_update = wlcore_op_sta_rc_update,
5615	.get_rssi = wlcore_op_get_rssi,
5616	CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5617};
5618
5619
5620u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5621{
5622	u8 idx;
5623
5624	BUG_ON(band >= 2);
5625
5626	if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5627		wl1271_error("Illegal RX rate from HW: %d", rate);
5628		return 0;
5629	}
5630
5631	idx = wl->band_rate_to_idx[band][rate];
5632	if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5633		wl1271_error("Unsupported RX rate from HW: %d", rate);
5634		return 0;
5635	}
5636
5637	return idx;
5638}
5639
5640static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5641{
5642	int i;
5643
5644	wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5645		     oui, nic);
5646
5647	if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5648		wl1271_warning("NIC part of the MAC address wraps around!");
5649
5650	for (i = 0; i < wl->num_mac_addr; i++) {
5651		wl->addresses[i].addr[0] = (u8)(oui >> 16);
5652		wl->addresses[i].addr[1] = (u8)(oui >> 8);
5653		wl->addresses[i].addr[2] = (u8) oui;
5654		wl->addresses[i].addr[3] = (u8)(nic >> 16);
5655		wl->addresses[i].addr[4] = (u8)(nic >> 8);
5656		wl->addresses[i].addr[5] = (u8) nic;
5657		nic++;
5658	}
5659
5660	/* we may be one address short at the most */
5661	WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5662
5663	/*
5664	 * turn on the LAA bit in the first address and use it as
5665	 * the last address.
5666	 */
5667	if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5668		int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5669		memcpy(&wl->addresses[idx], &wl->addresses[0],
5670		       sizeof(wl->addresses[0]));
5671		/* LAA bit */
5672		wl->addresses[idx].addr[0] |= BIT(1);
5673	}
5674
5675	wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5676	wl->hw->wiphy->addresses = wl->addresses;
5677}
5678
5679static int wl12xx_get_hw_info(struct wl1271 *wl)
5680{
5681	int ret;
5682
5683	ret = wl12xx_set_power_on(wl);
5684	if (ret < 0)
5685		return ret;
5686
5687	ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5688	if (ret < 0)
5689		goto out;
5690
5691	wl->fuse_oui_addr = 0;
5692	wl->fuse_nic_addr = 0;
5693
5694	ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5695	if (ret < 0)
5696		goto out;
5697
5698	if (wl->ops->get_mac)
5699		ret = wl->ops->get_mac(wl);
5700
5701out:
5702	wl1271_power_off(wl);
5703	return ret;
5704}
5705
5706static int wl1271_register_hw(struct wl1271 *wl)
5707{
5708	int ret;
5709	u32 oui_addr = 0, nic_addr = 0;
5710
5711	if (wl->mac80211_registered)
5712		return 0;
5713
5714	if (wl->nvs_len >= 12) {
5715		/* NOTE: The wl->nvs->nvs element must be first, in
5716		 * order to simplify the casting, we assume it is at
5717		 * the beginning of the wl->nvs structure.
5718		 */
5719		u8 *nvs_ptr = (u8 *)wl->nvs;
5720
5721		oui_addr =
5722			(nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5723		nic_addr =
5724			(nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5725	}
5726
5727	/* if the MAC address is zeroed in the NVS derive from fuse */
5728	if (oui_addr == 0 && nic_addr == 0) {
5729		oui_addr = wl->fuse_oui_addr;
5730		/* fuse has the BD_ADDR, the WLAN addresses are the next two */
5731		nic_addr = wl->fuse_nic_addr + 1;
5732	}
5733
5734	wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5735
5736	ret = ieee80211_register_hw(wl->hw);
5737	if (ret < 0) {
5738		wl1271_error("unable to register mac80211 hw: %d", ret);
5739		goto out;
5740	}
5741
5742	wl->mac80211_registered = true;
5743
5744	wl1271_debugfs_init(wl);
5745
5746	wl1271_notice("loaded");
5747
5748out:
5749	return ret;
5750}
5751
5752static void wl1271_unregister_hw(struct wl1271 *wl)
5753{
5754	if (wl->plt)
5755		wl1271_plt_stop(wl);
5756
5757	ieee80211_unregister_hw(wl->hw);
5758	wl->mac80211_registered = false;
5759
5760}
5761
5762static int wl1271_init_ieee80211(struct wl1271 *wl)
5763{
5764	int i;
5765	static const u32 cipher_suites[] = {
5766		WLAN_CIPHER_SUITE_WEP40,
5767		WLAN_CIPHER_SUITE_WEP104,
5768		WLAN_CIPHER_SUITE_TKIP,
5769		WLAN_CIPHER_SUITE_CCMP,
5770		WL1271_CIPHER_SUITE_GEM,
5771	};
5772
5773	/* The tx descriptor buffer */
5774	wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5775
5776	if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5777		wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
5778
5779	/* unit us */
5780	/* FIXME: find a proper value */
5781	wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5782
5783	wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5784		IEEE80211_HW_SUPPORTS_PS |
5785		IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5786		IEEE80211_HW_SUPPORTS_UAPSD |
5787		IEEE80211_HW_HAS_RATE_CONTROL |
5788		IEEE80211_HW_CONNECTION_MONITOR |
5789		IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5790		IEEE80211_HW_SPECTRUM_MGMT |
5791		IEEE80211_HW_AP_LINK_PS |
5792		IEEE80211_HW_AMPDU_AGGREGATION |
5793		IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5794		IEEE80211_HW_QUEUE_CONTROL |
5795		IEEE80211_HW_CHANCTX_STA_CSA;
5796
5797	wl->hw->wiphy->cipher_suites = cipher_suites;
5798	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5799
5800	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5801		BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5802		BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5803	wl->hw->wiphy->max_scan_ssids = 1;
5804	wl->hw->wiphy->max_sched_scan_ssids = 16;
5805	wl->hw->wiphy->max_match_sets = 16;
5806	/*
5807	 * Maximum length of elements in scanning probe request templates
5808	 * should be the maximum length possible for a template, without
5809	 * the IEEE80211 header of the template
5810	 */
5811	wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5812			sizeof(struct ieee80211_header);
5813
5814	wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5815		sizeof(struct ieee80211_header);
5816
5817	wl->hw->wiphy->max_remain_on_channel_duration = 30000;
5818
5819	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5820				WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
5821				WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
5822
5823	/* make sure all our channels fit in the scanned_ch bitmask */
5824	BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5825		     ARRAY_SIZE(wl1271_channels_5ghz) >
5826		     WL1271_MAX_CHANNELS);
5827	/*
5828	* clear channel flags from the previous usage
5829	* and restore max_power & max_antenna_gain values.
5830	*/
5831	for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
5832		wl1271_band_2ghz.channels[i].flags = 0;
5833		wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5834		wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
5835	}
5836
5837	for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
5838		wl1271_band_5ghz.channels[i].flags = 0;
5839		wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5840		wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
5841	}
5842
5843	/*
5844	 * We keep local copies of the band structs because we need to
5845	 * modify them on a per-device basis.
5846	 */
5847	memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5848	       sizeof(wl1271_band_2ghz));
5849	memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5850	       &wl->ht_cap[IEEE80211_BAND_2GHZ],
5851	       sizeof(*wl->ht_cap));
5852	memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5853	       sizeof(wl1271_band_5ghz));
5854	memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5855	       &wl->ht_cap[IEEE80211_BAND_5GHZ],
5856	       sizeof(*wl->ht_cap));
5857
5858	wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5859		&wl->bands[IEEE80211_BAND_2GHZ];
5860	wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5861		&wl->bands[IEEE80211_BAND_5GHZ];
5862
5863	/*
5864	 * allow 4 queues per mac address we support +
5865	 * 1 cab queue per mac + one global offchannel Tx queue
5866	 */
5867	wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
5868
5869	/* the last queue is the offchannel queue */
5870	wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
5871	wl->hw->max_rates = 1;
5872
5873	wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5874
5875	/* the FW answers probe-requests in AP-mode */
5876	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5877	wl->hw->wiphy->probe_resp_offload =
5878		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5879		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5880		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5881
5882	/* allowed interface combinations */
5883	wl->hw->wiphy->iface_combinations = wl->iface_combinations;
5884	wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
5885
5886	/* register vendor commands */
5887	wlcore_set_vendor_commands(wl->hw->wiphy);
5888
5889	SET_IEEE80211_DEV(wl->hw, wl->dev);
5890
5891	wl->hw->sta_data_size = sizeof(struct wl1271_station);
5892	wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5893
5894	wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5895
5896	return 0;
5897}
5898
5899struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5900				     u32 mbox_size)
5901{
5902	struct ieee80211_hw *hw;
5903	struct wl1271 *wl;
5904	int i, j, ret;
5905	unsigned int order;
5906
5907	hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5908	if (!hw) {
5909		wl1271_error("could not alloc ieee80211_hw");
5910		ret = -ENOMEM;
5911		goto err_hw_alloc;
5912	}
5913
5914	wl = hw->priv;
5915	memset(wl, 0, sizeof(*wl));
5916
5917	wl->priv = kzalloc(priv_size, GFP_KERNEL);
5918	if (!wl->priv) {
5919		wl1271_error("could not alloc wl priv");
5920		ret = -ENOMEM;
5921		goto err_priv_alloc;
5922	}
5923
5924	INIT_LIST_HEAD(&wl->wlvif_list);
5925
5926	wl->hw = hw;
5927
5928	/*
5929	 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
5930	 * we don't allocate any additional resource here, so that's fine.
5931	 */
5932	for (i = 0; i < NUM_TX_QUEUES; i++)
5933		for (j = 0; j < WLCORE_MAX_LINKS; j++)
5934			skb_queue_head_init(&wl->links[j].tx_queue[i]);
5935
5936	skb_queue_head_init(&wl->deferred_rx_queue);
5937	skb_queue_head_init(&wl->deferred_tx_queue);
5938
5939	INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5940	INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5941	INIT_WORK(&wl->tx_work, wl1271_tx_work);
5942	INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5943	INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5944	INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
5945	INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5946
5947	wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5948	if (!wl->freezable_wq) {
5949		ret = -ENOMEM;
5950		goto err_hw;
5951	}
5952
5953	wl->channel = 0;
5954	wl->rx_counter = 0;
5955	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5956	wl->band = IEEE80211_BAND_2GHZ;
5957	wl->channel_type = NL80211_CHAN_NO_HT;
5958	wl->flags = 0;
5959	wl->sg_enabled = true;
5960	wl->sleep_auth = WL1271_PSM_ILLEGAL;
5961	wl->recovery_count = 0;
5962	wl->hw_pg_ver = -1;
5963	wl->ap_ps_map = 0;
5964	wl->ap_fw_ps_map = 0;
5965	wl->quirks = 0;
5966	wl->platform_quirks = 0;
5967	wl->system_hlid = WL12XX_SYSTEM_HLID;
5968	wl->active_sta_count = 0;
5969	wl->active_link_count = 0;
5970	wl->fwlog_size = 0;
5971	init_waitqueue_head(&wl->fwlog_waitq);
5972
5973	/* The system link is always allocated */
5974	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5975
5976	memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5977	for (i = 0; i < wl->num_tx_desc; i++)
5978		wl->tx_frames[i] = NULL;
5979
5980	spin_lock_init(&wl->wl_lock);
5981
5982	wl->state = WLCORE_STATE_OFF;
5983	wl->fw_type = WL12XX_FW_TYPE_NONE;
5984	mutex_init(&wl->mutex);
5985	mutex_init(&wl->flush_mutex);
5986	init_completion(&wl->nvs_loading_complete);
5987
5988	order = get_order(aggr_buf_size);
5989	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5990	if (!wl->aggr_buf) {
5991		ret = -ENOMEM;
5992		goto err_wq;
5993	}
5994	wl->aggr_buf_size = aggr_buf_size;
5995
5996	wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5997	if (!wl->dummy_packet) {
5998		ret = -ENOMEM;
5999		goto err_aggr;
6000	}
6001
6002	/* Allocate one page for the FW log */
6003	wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6004	if (!wl->fwlog) {
6005		ret = -ENOMEM;
6006		goto err_dummy_packet;
6007	}
6008
6009	wl->mbox_size = mbox_size;
6010	wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6011	if (!wl->mbox) {
6012		ret = -ENOMEM;
6013		goto err_fwlog;
6014	}
6015
6016	wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6017	if (!wl->buffer_32) {
6018		ret = -ENOMEM;
6019		goto err_mbox;
6020	}
6021
6022	return hw;
6023
6024err_mbox:
6025	kfree(wl->mbox);
6026
6027err_fwlog:
6028	free_page((unsigned long)wl->fwlog);
6029
6030err_dummy_packet:
6031	dev_kfree_skb(wl->dummy_packet);
6032
6033err_aggr:
6034	free_pages((unsigned long)wl->aggr_buf, order);
6035
6036err_wq:
6037	destroy_workqueue(wl->freezable_wq);
6038
6039err_hw:
6040	wl1271_debugfs_exit(wl);
6041	kfree(wl->priv);
6042
6043err_priv_alloc:
6044	ieee80211_free_hw(hw);
6045
6046err_hw_alloc:
6047
6048	return ERR_PTR(ret);
6049}
6050EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6051
6052int wlcore_free_hw(struct wl1271 *wl)
6053{
6054	/* Unblock any fwlog readers */
6055	mutex_lock(&wl->mutex);
6056	wl->fwlog_size = -1;
6057	wake_up_interruptible_all(&wl->fwlog_waitq);
6058	mutex_unlock(&wl->mutex);
6059
6060	wlcore_sysfs_free(wl);
6061
6062	kfree(wl->buffer_32);
6063	kfree(wl->mbox);
6064	free_page((unsigned long)wl->fwlog);
6065	dev_kfree_skb(wl->dummy_packet);
6066	free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6067
6068	wl1271_debugfs_exit(wl);
6069
6070	vfree(wl->fw);
6071	wl->fw = NULL;
6072	wl->fw_type = WL12XX_FW_TYPE_NONE;
6073	kfree(wl->nvs);
6074	wl->nvs = NULL;
6075
6076	kfree(wl->raw_fw_status);
6077	kfree(wl->fw_status);
6078	kfree(wl->tx_res_if);
6079	destroy_workqueue(wl->freezable_wq);
6080
6081	kfree(wl->priv);
6082	ieee80211_free_hw(wl->hw);
6083
6084	return 0;
6085}
6086EXPORT_SYMBOL_GPL(wlcore_free_hw);
6087
6088#ifdef CONFIG_PM
6089static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6090	.flags = WIPHY_WOWLAN_ANY,
6091	.n_patterns = WL1271_MAX_RX_FILTERS,
6092	.pattern_min_len = 1,
6093	.pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6094};
6095#endif
6096
6097static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6098{
6099	return IRQ_WAKE_THREAD;
6100}
6101
6102static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6103{
6104	struct wl1271 *wl = context;
6105	struct platform_device *pdev = wl->pdev;
6106	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6107	struct wl12xx_platform_data *pdata = pdev_data->pdata;
6108	unsigned long irqflags;
6109	int ret;
6110	irq_handler_t hardirq_fn = NULL;
6111
6112	if (fw) {
6113		wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6114		if (!wl->nvs) {
6115			wl1271_error("Could not allocate nvs data");
6116			goto out;
6117		}
6118		wl->nvs_len = fw->size;
6119	} else {
6120		wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6121			     WL12XX_NVS_NAME);
6122		wl->nvs = NULL;
6123		wl->nvs_len = 0;
6124	}
6125
6126	ret = wl->ops->setup(wl);
6127	if (ret < 0)
6128		goto out_free_nvs;
6129
6130	BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6131
6132	/* adjust some runtime configuration parameters */
6133	wlcore_adjust_conf(wl);
6134
6135	wl->irq = platform_get_irq(pdev, 0);
6136	wl->platform_quirks = pdata->platform_quirks;
6137	wl->if_ops = pdev_data->if_ops;
6138
6139	if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) {
6140		irqflags = IRQF_TRIGGER_RISING;
6141		hardirq_fn = wlcore_hardirq;
6142	} else {
6143		irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
6144	}
6145
6146	ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6147				   irqflags, pdev->name, wl);
6148	if (ret < 0) {
6149		wl1271_error("request_irq() failed: %d", ret);
6150		goto out_free_nvs;
6151	}
6152
6153#ifdef CONFIG_PM
6154	ret = enable_irq_wake(wl->irq);
6155	if (!ret) {
6156		wl->irq_wake_enabled = true;
6157		device_init_wakeup(wl->dev, 1);
6158		if (pdata->pwr_in_suspend)
6159			wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6160	}
6161#endif
6162	disable_irq(wl->irq);
6163
6164	ret = wl12xx_get_hw_info(wl);
6165	if (ret < 0) {
6166		wl1271_error("couldn't get hw info");
6167		goto out_irq;
6168	}
6169
6170	ret = wl->ops->identify_chip(wl);
6171	if (ret < 0)
6172		goto out_irq;
6173
6174	ret = wl1271_init_ieee80211(wl);
6175	if (ret)
6176		goto out_irq;
6177
6178	ret = wl1271_register_hw(wl);
6179	if (ret)
6180		goto out_irq;
6181
6182	ret = wlcore_sysfs_init(wl);
6183	if (ret)
6184		goto out_unreg;
6185
6186	wl->initialized = true;
6187	goto out;
6188
6189out_unreg:
6190	wl1271_unregister_hw(wl);
6191
6192out_irq:
6193	free_irq(wl->irq, wl);
6194
6195out_free_nvs:
6196	kfree(wl->nvs);
6197
6198out:
6199	release_firmware(fw);
6200	complete_all(&wl->nvs_loading_complete);
6201}
6202
6203int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6204{
6205	int ret;
6206
6207	if (!wl->ops || !wl->ptable)
6208		return -EINVAL;
6209
6210	wl->dev = &pdev->dev;
6211	wl->pdev = pdev;
6212	platform_set_drvdata(pdev, wl);
6213
6214	ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6215				      WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6216				      wl, wlcore_nvs_cb);
6217	if (ret < 0) {
6218		wl1271_error("request_firmware_nowait failed: %d", ret);
6219		complete_all(&wl->nvs_loading_complete);
6220	}
6221
6222	return ret;
6223}
6224EXPORT_SYMBOL_GPL(wlcore_probe);
6225
6226int wlcore_remove(struct platform_device *pdev)
6227{
6228	struct wl1271 *wl = platform_get_drvdata(pdev);
6229
6230	wait_for_completion(&wl->nvs_loading_complete);
6231	if (!wl->initialized)
6232		return 0;
6233
6234	if (wl->irq_wake_enabled) {
6235		device_init_wakeup(wl->dev, 0);
6236		disable_irq_wake(wl->irq);
6237	}
6238	wl1271_unregister_hw(wl);
6239	free_irq(wl->irq, wl);
6240	wlcore_free_hw(wl);
6241
6242	return 0;
6243}
6244EXPORT_SYMBOL_GPL(wlcore_remove);
6245
6246u32 wl12xx_debug_level = DEBUG_NONE;
6247EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6248module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6249MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6250
6251module_param_named(fwlog, fwlog_param, charp, 0);
6252MODULE_PARM_DESC(fwlog,
6253		 "FW logger options: continuous, ondemand, dbgpins or disable");
6254
6255module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6256MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6257
6258module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6259MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6260
6261module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6262MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6263
6264MODULE_LICENSE("GPL");
6265MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6266MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6267MODULE_FIRMWARE(WL12XX_NVS_NAME);
6268