dhd_sdio.c revision 5aa9f0ea18f3d5ec329a619b0bc54e214e02bc33
1/*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/types.h>
18#include <linux/kernel.h>
19#include <linux/kthread.h>
20#include <linux/printk.h>
21#include <linux/pci_ids.h>
22#include <linux/netdevice.h>
23#include <linux/interrupt.h>
24#include <linux/sched.h>
25#include <linux/mmc/sdio.h>
26#include <linux/mmc/sdio_ids.h>
27#include <linux/mmc/sdio_func.h>
28#include <linux/mmc/card.h>
29#include <linux/semaphore.h>
30#include <linux/firmware.h>
31#include <linux/module.h>
32#include <linux/bcma/bcma.h>
33#include <linux/debugfs.h>
34#include <linux/vmalloc.h>
35#include <linux/platform_data/brcmfmac-sdio.h>
36#include <linux/moduleparam.h>
37#include <asm/unaligned.h>
38#include <defs.h>
39#include <brcmu_wifi.h>
40#include <brcmu_utils.h>
41#include <brcm_hw_ids.h>
42#include <soc.h>
43#include "sdio_host.h"
44#include "chip.h"
45#include "nvram.h"
46
47#define DCMD_RESP_TIMEOUT  2000	/* In milli second */
48
49#ifdef DEBUG
50
51#define BRCMF_TRAP_INFO_SIZE	80
52
53#define CBUF_LEN	(128)
54
55/* Device console log buffer state */
56#define CONSOLE_BUFFER_MAX	2024
57
58struct rte_log_le {
59	__le32 buf;		/* Can't be pointer on (64-bit) hosts */
60	__le32 buf_size;
61	__le32 idx;
62	char *_buf_compat;	/* Redundant pointer for backward compat. */
63};
64
65struct rte_console {
66	/* Virtual UART
67	 * When there is no UART (e.g. Quickturn),
68	 * the host should write a complete
69	 * input line directly into cbuf and then write
70	 * the length into vcons_in.
71	 * This may also be used when there is a real UART
72	 * (at risk of conflicting with
73	 * the real UART).  vcons_out is currently unused.
74	 */
75	uint vcons_in;
76	uint vcons_out;
77
78	/* Output (logging) buffer
79	 * Console output is written to a ring buffer log_buf at index log_idx.
80	 * The host may read the output when it sees log_idx advance.
81	 * Output will be lost if the output wraps around faster than the host
82	 * polls.
83	 */
84	struct rte_log_le log_le;
85
86	/* Console input line buffer
87	 * Characters are read one at a time into cbuf
88	 * until <CR> is received, then
89	 * the buffer is processed as a command line.
90	 * Also used for virtual UART.
91	 */
92	uint cbuf_idx;
93	char cbuf[CBUF_LEN];
94};
95
96#endif				/* DEBUG */
97#include <chipcommon.h>
98
99#include "dhd_bus.h"
100#include "dhd_dbg.h"
101#include "tracepoint.h"
102
103#define TXQLEN		2048	/* bulk tx queue length */
104#define TXHI		(TXQLEN - 256)	/* turn on flow control above TXHI */
105#define TXLOW		(TXHI - 256)	/* turn off flow control below TXLOW */
106#define PRIOMASK	7
107
108#define TXRETRIES	2	/* # of retries for tx frames */
109
110#define BRCMF_RXBOUND	50	/* Default for max rx frames in
111				 one scheduling */
112
113#define BRCMF_TXBOUND	20	/* Default for max tx frames in
114				 one scheduling */
115
116#define BRCMF_DEFAULT_TXGLOM_SIZE	32  /* max tx frames in glom chain */
117
118#define BRCMF_TXMINMAX	1	/* Max tx frames if rx still pending */
119
120#define MEMBLOCK	2048	/* Block size used for downloading
121				 of dongle image */
122#define MAX_DATA_BUF	(32 * 1024)	/* Must be large enough to hold
123				 biggest possible glom */
124
125#define BRCMF_FIRSTREAD	(1 << 6)
126
127
128/* SBSDIO_DEVICE_CTL */
129
130/* 1: device will assert busy signal when receiving CMD53 */
131#define SBSDIO_DEVCTL_SETBUSY		0x01
132/* 1: assertion of sdio interrupt is synchronous to the sdio clock */
133#define SBSDIO_DEVCTL_SPI_INTR_SYNC	0x02
134/* 1: mask all interrupts to host except the chipActive (rev 8) */
135#define SBSDIO_DEVCTL_CA_INT_ONLY	0x04
136/* 1: isolate internal sdio signals, put external pads in tri-state; requires
137 * sdio bus power cycle to clear (rev 9) */
138#define SBSDIO_DEVCTL_PADS_ISO		0x08
139/* Force SD->SB reset mapping (rev 11) */
140#define SBSDIO_DEVCTL_SB_RST_CTL	0x30
141/*   Determined by CoreControl bit */
142#define SBSDIO_DEVCTL_RST_CORECTL	0x00
143/*   Force backplane reset */
144#define SBSDIO_DEVCTL_RST_BPRESET	0x10
145/*   Force no backplane reset */
146#define SBSDIO_DEVCTL_RST_NOBPRESET	0x20
147
148/* direct(mapped) cis space */
149
150/* MAPPED common CIS address */
151#define SBSDIO_CIS_BASE_COMMON		0x1000
152/* maximum bytes in one CIS */
153#define SBSDIO_CIS_SIZE_LIMIT		0x200
154/* cis offset addr is < 17 bits */
155#define SBSDIO_CIS_OFT_ADDR_MASK	0x1FFFF
156
157/* manfid tuple length, include tuple, link bytes */
158#define SBSDIO_CIS_MANFID_TUPLE_LEN	6
159
160#define CORE_BUS_REG(base, field) \
161		(base + offsetof(struct sdpcmd_regs, field))
162
163/* SDIO function 1 register CHIPCLKCSR */
164/* Force ALP request to backplane */
165#define SBSDIO_FORCE_ALP		0x01
166/* Force HT request to backplane */
167#define SBSDIO_FORCE_HT			0x02
168/* Force ILP request to backplane */
169#define SBSDIO_FORCE_ILP		0x04
170/* Make ALP ready (power up xtal) */
171#define SBSDIO_ALP_AVAIL_REQ		0x08
172/* Make HT ready (power up PLL) */
173#define SBSDIO_HT_AVAIL_REQ		0x10
174/* Squelch clock requests from HW */
175#define SBSDIO_FORCE_HW_CLKREQ_OFF	0x20
176/* Status: ALP is ready */
177#define SBSDIO_ALP_AVAIL		0x40
178/* Status: HT is ready */
179#define SBSDIO_HT_AVAIL			0x80
180#define SBSDIO_AVBITS		(SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
181#define SBSDIO_ALPAV(regval)	((regval) & SBSDIO_AVBITS)
182#define SBSDIO_HTAV(regval)	(((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
183#define SBSDIO_ALPONLY(regval)	(SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
184#define SBSDIO_CLKAV(regval, alponly) \
185	(SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
186
187/* intstatus */
188#define I_SMB_SW0	(1 << 0)	/* To SB Mail S/W interrupt 0 */
189#define I_SMB_SW1	(1 << 1)	/* To SB Mail S/W interrupt 1 */
190#define I_SMB_SW2	(1 << 2)	/* To SB Mail S/W interrupt 2 */
191#define I_SMB_SW3	(1 << 3)	/* To SB Mail S/W interrupt 3 */
192#define I_SMB_SW_MASK	0x0000000f	/* To SB Mail S/W interrupts mask */
193#define I_SMB_SW_SHIFT	0	/* To SB Mail S/W interrupts shift */
194#define I_HMB_SW0	(1 << 4)	/* To Host Mail S/W interrupt 0 */
195#define I_HMB_SW1	(1 << 5)	/* To Host Mail S/W interrupt 1 */
196#define I_HMB_SW2	(1 << 6)	/* To Host Mail S/W interrupt 2 */
197#define I_HMB_SW3	(1 << 7)	/* To Host Mail S/W interrupt 3 */
198#define I_HMB_SW_MASK	0x000000f0	/* To Host Mail S/W interrupts mask */
199#define I_HMB_SW_SHIFT	4	/* To Host Mail S/W interrupts shift */
200#define I_WR_OOSYNC	(1 << 8)	/* Write Frame Out Of Sync */
201#define I_RD_OOSYNC	(1 << 9)	/* Read Frame Out Of Sync */
202#define	I_PC		(1 << 10)	/* descriptor error */
203#define	I_PD		(1 << 11)	/* data error */
204#define	I_DE		(1 << 12)	/* Descriptor protocol Error */
205#define	I_RU		(1 << 13)	/* Receive descriptor Underflow */
206#define	I_RO		(1 << 14)	/* Receive fifo Overflow */
207#define	I_XU		(1 << 15)	/* Transmit fifo Underflow */
208#define	I_RI		(1 << 16)	/* Receive Interrupt */
209#define I_BUSPWR	(1 << 17)	/* SDIO Bus Power Change (rev 9) */
210#define I_XMTDATA_AVAIL (1 << 23)	/* bits in fifo */
211#define	I_XI		(1 << 24)	/* Transmit Interrupt */
212#define I_RF_TERM	(1 << 25)	/* Read Frame Terminate */
213#define I_WF_TERM	(1 << 26)	/* Write Frame Terminate */
214#define I_PCMCIA_XU	(1 << 27)	/* PCMCIA Transmit FIFO Underflow */
215#define I_SBINT		(1 << 28)	/* sbintstatus Interrupt */
216#define I_CHIPACTIVE	(1 << 29)	/* chip from doze to active state */
217#define I_SRESET	(1 << 30)	/* CCCR RES interrupt */
218#define I_IOE2		(1U << 31)	/* CCCR IOE2 Bit Changed */
219#define	I_ERRORS	(I_PC | I_PD | I_DE | I_RU | I_RO | I_XU)
220#define I_DMA		(I_RI | I_XI | I_ERRORS)
221
222/* corecontrol */
223#define CC_CISRDY		(1 << 0)	/* CIS Ready */
224#define CC_BPRESEN		(1 << 1)	/* CCCR RES signal */
225#define CC_F2RDY		(1 << 2)	/* set CCCR IOR2 bit */
226#define CC_CLRPADSISO		(1 << 3)	/* clear SDIO pads isolation */
227#define CC_XMTDATAAVAIL_MODE	(1 << 4)
228#define CC_XMTDATAAVAIL_CTRL	(1 << 5)
229
230/* SDA_FRAMECTRL */
231#define SFC_RF_TERM	(1 << 0)	/* Read Frame Terminate */
232#define SFC_WF_TERM	(1 << 1)	/* Write Frame Terminate */
233#define SFC_CRC4WOOS	(1 << 2)	/* CRC error for write out of sync */
234#define SFC_ABORTALL	(1 << 3)	/* Abort all in-progress frames */
235
236/*
237 * Software allocation of To SB Mailbox resources
238 */
239
240/* tosbmailbox bits corresponding to intstatus bits */
241#define SMB_NAK		(1 << 0)	/* Frame NAK */
242#define SMB_INT_ACK	(1 << 1)	/* Host Interrupt ACK */
243#define SMB_USE_OOB	(1 << 2)	/* Use OOB Wakeup */
244#define SMB_DEV_INT	(1 << 3)	/* Miscellaneous Interrupt */
245
246/* tosbmailboxdata */
247#define SMB_DATA_VERSION_SHIFT	16	/* host protocol version */
248
249/*
250 * Software allocation of To Host Mailbox resources
251 */
252
253/* intstatus bits */
254#define I_HMB_FC_STATE	I_HMB_SW0	/* Flow Control State */
255#define I_HMB_FC_CHANGE	I_HMB_SW1	/* Flow Control State Changed */
256#define I_HMB_FRAME_IND	I_HMB_SW2	/* Frame Indication */
257#define I_HMB_HOST_INT	I_HMB_SW3	/* Miscellaneous Interrupt */
258
259/* tohostmailboxdata */
260#define HMB_DATA_NAKHANDLED	1	/* retransmit NAK'd frame */
261#define HMB_DATA_DEVREADY	2	/* talk to host after enable */
262#define HMB_DATA_FC		4	/* per prio flowcontrol update flag */
263#define HMB_DATA_FWREADY	8	/* fw ready for protocol activity */
264
265#define HMB_DATA_FCDATA_MASK	0xff000000
266#define HMB_DATA_FCDATA_SHIFT	24
267
268#define HMB_DATA_VERSION_MASK	0x00ff0000
269#define HMB_DATA_VERSION_SHIFT	16
270
271/*
272 * Software-defined protocol header
273 */
274
275/* Current protocol version */
276#define SDPCM_PROT_VERSION	4
277
278/*
279 * Shared structure between dongle and the host.
280 * The structure contains pointers to trap or assert information.
281 */
282#define SDPCM_SHARED_VERSION       0x0003
283#define SDPCM_SHARED_VERSION_MASK  0x00FF
284#define SDPCM_SHARED_ASSERT_BUILT  0x0100
285#define SDPCM_SHARED_ASSERT        0x0200
286#define SDPCM_SHARED_TRAP          0x0400
287
288/* Space for header read, limit for data packets */
289#define MAX_HDR_READ	(1 << 6)
290#define MAX_RX_DATASZ	2048
291
292/* Bump up limit on waiting for HT to account for first startup;
293 * if the image is doing a CRC calculation before programming the PMU
294 * for HT availability, it could take a couple hundred ms more, so
295 * max out at a 1 second (1000000us).
296 */
297#undef PMU_MAX_TRANSITION_DLY
298#define PMU_MAX_TRANSITION_DLY 1000000
299
300/* Value for ChipClockCSR during initial setup */
301#define BRCMF_INIT_CLKCTL1	(SBSDIO_FORCE_HW_CLKREQ_OFF |	\
302					SBSDIO_ALP_AVAIL_REQ)
303
304/* Flags for SDH calls */
305#define F2SYNC	(SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
306
307#define BRCMF_IDLE_IMMEDIATE	(-1)	/* Enter idle immediately */
308#define BRCMF_IDLE_ACTIVE	0	/* Do not request any SD clock change
309					 * when idle
310					 */
311#define BRCMF_IDLE_INTERVAL	1
312
313#define KSO_WAIT_US 50
314#define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
315
316/*
317 * Conversion of 802.1D priority to precedence level
318 */
319static uint prio2prec(u32 prio)
320{
321	return (prio == PRIO_8021D_NONE || prio == PRIO_8021D_BE) ?
322	       (prio^2) : prio;
323}
324
325#ifdef DEBUG
326/* Device console log buffer state */
327struct brcmf_console {
328	uint count;		/* Poll interval msec counter */
329	uint log_addr;		/* Log struct address (fixed) */
330	struct rte_log_le log_le;	/* Log struct (host copy) */
331	uint bufsize;		/* Size of log buffer */
332	u8 *buf;		/* Log buffer (host copy) */
333	uint last;		/* Last buffer read index */
334};
335
336struct brcmf_trap_info {
337	__le32		type;
338	__le32		epc;
339	__le32		cpsr;
340	__le32		spsr;
341	__le32		r0;	/* a1 */
342	__le32		r1;	/* a2 */
343	__le32		r2;	/* a3 */
344	__le32		r3;	/* a4 */
345	__le32		r4;	/* v1 */
346	__le32		r5;	/* v2 */
347	__le32		r6;	/* v3 */
348	__le32		r7;	/* v4 */
349	__le32		r8;	/* v5 */
350	__le32		r9;	/* sb/v6 */
351	__le32		r10;	/* sl/v7 */
352	__le32		r11;	/* fp/v8 */
353	__le32		r12;	/* ip */
354	__le32		r13;	/* sp */
355	__le32		r14;	/* lr */
356	__le32		pc;	/* r15 */
357};
358#endif				/* DEBUG */
359
360struct sdpcm_shared {
361	u32 flags;
362	u32 trap_addr;
363	u32 assert_exp_addr;
364	u32 assert_file_addr;
365	u32 assert_line;
366	u32 console_addr;	/* Address of struct rte_console */
367	u32 msgtrace_addr;
368	u8 tag[32];
369	u32 brpt_addr;
370};
371
372struct sdpcm_shared_le {
373	__le32 flags;
374	__le32 trap_addr;
375	__le32 assert_exp_addr;
376	__le32 assert_file_addr;
377	__le32 assert_line;
378	__le32 console_addr;	/* Address of struct rte_console */
379	__le32 msgtrace_addr;
380	u8 tag[32];
381	__le32 brpt_addr;
382};
383
384/* dongle SDIO bus specific header info */
385struct brcmf_sdio_hdrinfo {
386	u8 seq_num;
387	u8 channel;
388	u16 len;
389	u16 len_left;
390	u16 len_nxtfrm;
391	u8 dat_offset;
392	bool lastfrm;
393	u16 tail_pad;
394};
395
396/* misc chip info needed by some of the routines */
397/* Private data for SDIO bus interaction */
398struct brcmf_sdio {
399	struct brcmf_sdio_dev *sdiodev;	/* sdio device handler */
400	struct brcmf_chip *ci;	/* Chip info struct */
401
402	u32 ramsize;		/* Size of RAM in SOCRAM (bytes) */
403
404	u32 hostintmask;	/* Copy of Host Interrupt Mask */
405	atomic_t intstatus;	/* Intstatus bits (events) pending */
406	atomic_t fcstate;	/* State of dongle flow-control */
407
408	uint blocksize;		/* Block size of SDIO transfers */
409	uint roundup;		/* Max roundup limit */
410
411	struct pktq txq;	/* Queue length used for flow-control */
412	u8 flowcontrol;	/* per prio flow control bitmask */
413	u8 tx_seq;		/* Transmit sequence number (next) */
414	u8 tx_max;		/* Maximum transmit sequence allowed */
415
416	u8 *hdrbuf;		/* buffer for handling rx frame */
417	u8 *rxhdr;		/* Header of current rx frame (in hdrbuf) */
418	u8 rx_seq;		/* Receive sequence number (expected) */
419	struct brcmf_sdio_hdrinfo cur_read;
420				/* info of current read frame */
421	bool rxskip;		/* Skip receive (awaiting NAK ACK) */
422	bool rxpending;		/* Data frame pending in dongle */
423
424	uint rxbound;		/* Rx frames to read before resched */
425	uint txbound;		/* Tx frames to send before resched */
426	uint txminmax;
427
428	struct sk_buff *glomd;	/* Packet containing glomming descriptor */
429	struct sk_buff_head glom; /* Packet list for glommed superframe */
430	uint glomerr;		/* Glom packet read errors */
431
432	u8 *rxbuf;		/* Buffer for receiving control packets */
433	uint rxblen;		/* Allocated length of rxbuf */
434	u8 *rxctl;		/* Aligned pointer into rxbuf */
435	u8 *rxctl_orig;		/* pointer for freeing rxctl */
436	uint rxlen;		/* Length of valid data in buffer */
437	spinlock_t rxctl_lock;	/* protection lock for ctrl frame resources */
438
439	u8 sdpcm_ver;	/* Bus protocol reported by dongle */
440
441	bool intr;		/* Use interrupts */
442	bool poll;		/* Use polling */
443	atomic_t ipend;		/* Device interrupt is pending */
444	uint spurious;		/* Count of spurious interrupts */
445	uint pollrate;		/* Ticks between device polls */
446	uint polltick;		/* Tick counter */
447
448#ifdef DEBUG
449	uint console_interval;
450	struct brcmf_console console;	/* Console output polling support */
451	uint console_addr;	/* Console address from shared struct */
452#endif				/* DEBUG */
453
454	uint clkstate;		/* State of sd and backplane clock(s) */
455	bool activity;		/* Activity flag for clock down */
456	s32 idletime;		/* Control for activity timeout */
457	s32 idlecount;	/* Activity timeout counter */
458	s32 idleclock;	/* How to set bus driver when idle */
459	bool rxflow_mode;	/* Rx flow control mode */
460	bool rxflow;		/* Is rx flow control on */
461	bool alp_only;		/* Don't use HT clock (ALP only) */
462
463	u8 *ctrl_frame_buf;
464	u32 ctrl_frame_len;
465	bool ctrl_frame_stat;
466
467	spinlock_t txqlock;
468	wait_queue_head_t ctrl_wait;
469	wait_queue_head_t dcmd_resp_wait;
470
471	struct timer_list timer;
472	struct completion watchdog_wait;
473	struct task_struct *watchdog_tsk;
474	bool wd_timer_valid;
475	uint save_ms;
476
477	struct workqueue_struct *brcmf_wq;
478	struct work_struct datawork;
479	atomic_t dpc_tskcnt;
480
481	bool txoff;		/* Transmit flow-controlled */
482	struct brcmf_sdio_count sdcnt;
483	bool sr_enabled; /* SaveRestore enabled */
484	bool sleeping; /* SDIO bus sleeping */
485
486	u8 tx_hdrlen;		/* sdio bus header length for tx packet */
487	bool txglom;		/* host tx glomming enable flag */
488	struct sk_buff *txglom_sgpad;	/* scatter-gather padding buffer */
489	u16 head_align;		/* buffer pointer alignment */
490	u16 sgentry_align;	/* scatter-gather buffer alignment */
491};
492
493/* clkstate */
494#define CLK_NONE	0
495#define CLK_SDONLY	1
496#define CLK_PENDING	2
497#define CLK_AVAIL	3
498
499#ifdef DEBUG
500static int qcount[NUMPRIO];
501#endif				/* DEBUG */
502
503#define DEFAULT_SDIO_DRIVE_STRENGTH	6	/* in milliamps */
504
505#define RETRYCHAN(chan) ((chan) == SDPCM_EVENT_CHANNEL)
506
507/* Retry count for register access failures */
508static const uint retry_limit = 2;
509
510/* Limit on rounding up frames */
511static const uint max_roundup = 512;
512
513#define ALIGNMENT  4
514
515static int brcmf_sdio_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE;
516module_param_named(txglomsz, brcmf_sdio_txglomsz, int, 0);
517MODULE_PARM_DESC(txglomsz, "maximum tx packet chain size [SDIO]");
518
519enum brcmf_sdio_frmtype {
520	BRCMF_SDIO_FT_NORMAL,
521	BRCMF_SDIO_FT_SUPER,
522	BRCMF_SDIO_FT_SUB,
523};
524
525#define SDIOD_DRVSTR_KEY(chip, pmu)     (((chip) << 16) | (pmu))
526
527/* SDIO Pad drive strength to select value mappings */
528struct sdiod_drive_str {
529	u8 strength;	/* Pad Drive Strength in mA */
530	u8 sel;		/* Chip-specific select value */
531};
532
533/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
534static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
535	{32, 0x6},
536	{26, 0x7},
537	{22, 0x4},
538	{16, 0x5},
539	{12, 0x2},
540	{8, 0x3},
541	{4, 0x0},
542	{0, 0x1}
543};
544
545/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
546static const struct sdiod_drive_str sdiod_drive_strength_tab5_1v8[] = {
547	{6, 0x7},
548	{5, 0x6},
549	{4, 0x5},
550	{3, 0x4},
551	{2, 0x2},
552	{1, 0x1},
553	{0, 0x0}
554};
555
556/* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
557static const struct sdiod_drive_str sdiod_drvstr_tab6_1v8[] = {
558	{3, 0x3},
559	{2, 0x2},
560	{1, 0x1},
561	{0, 0x0} };
562
563/* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */
564static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
565	{16, 0x7},
566	{12, 0x5},
567	{8,  0x3},
568	{4,  0x1}
569};
570
571#define BCM43143_FIRMWARE_NAME		"brcm/brcmfmac43143-sdio.bin"
572#define BCM43143_NVRAM_NAME		"brcm/brcmfmac43143-sdio.txt"
573#define BCM43241B0_FIRMWARE_NAME	"brcm/brcmfmac43241b0-sdio.bin"
574#define BCM43241B0_NVRAM_NAME		"brcm/brcmfmac43241b0-sdio.txt"
575#define BCM43241B4_FIRMWARE_NAME	"brcm/brcmfmac43241b4-sdio.bin"
576#define BCM43241B4_NVRAM_NAME		"brcm/brcmfmac43241b4-sdio.txt"
577#define BCM4329_FIRMWARE_NAME		"brcm/brcmfmac4329-sdio.bin"
578#define BCM4329_NVRAM_NAME		"brcm/brcmfmac4329-sdio.txt"
579#define BCM4330_FIRMWARE_NAME		"brcm/brcmfmac4330-sdio.bin"
580#define BCM4330_NVRAM_NAME		"brcm/brcmfmac4330-sdio.txt"
581#define BCM4334_FIRMWARE_NAME		"brcm/brcmfmac4334-sdio.bin"
582#define BCM4334_NVRAM_NAME		"brcm/brcmfmac4334-sdio.txt"
583#define BCM4335_FIRMWARE_NAME		"brcm/brcmfmac4335-sdio.bin"
584#define BCM4335_NVRAM_NAME		"brcm/brcmfmac4335-sdio.txt"
585#define BCM43362_FIRMWARE_NAME		"brcm/brcmfmac43362-sdio.bin"
586#define BCM43362_NVRAM_NAME		"brcm/brcmfmac43362-sdio.txt"
587#define BCM4339_FIRMWARE_NAME		"brcm/brcmfmac4339-sdio.bin"
588#define BCM4339_NVRAM_NAME		"brcm/brcmfmac4339-sdio.txt"
589
590MODULE_FIRMWARE(BCM43143_FIRMWARE_NAME);
591MODULE_FIRMWARE(BCM43143_NVRAM_NAME);
592MODULE_FIRMWARE(BCM43241B0_FIRMWARE_NAME);
593MODULE_FIRMWARE(BCM43241B0_NVRAM_NAME);
594MODULE_FIRMWARE(BCM43241B4_FIRMWARE_NAME);
595MODULE_FIRMWARE(BCM43241B4_NVRAM_NAME);
596MODULE_FIRMWARE(BCM4329_FIRMWARE_NAME);
597MODULE_FIRMWARE(BCM4329_NVRAM_NAME);
598MODULE_FIRMWARE(BCM4330_FIRMWARE_NAME);
599MODULE_FIRMWARE(BCM4330_NVRAM_NAME);
600MODULE_FIRMWARE(BCM4334_FIRMWARE_NAME);
601MODULE_FIRMWARE(BCM4334_NVRAM_NAME);
602MODULE_FIRMWARE(BCM4335_FIRMWARE_NAME);
603MODULE_FIRMWARE(BCM4335_NVRAM_NAME);
604MODULE_FIRMWARE(BCM43362_FIRMWARE_NAME);
605MODULE_FIRMWARE(BCM43362_NVRAM_NAME);
606MODULE_FIRMWARE(BCM4339_FIRMWARE_NAME);
607MODULE_FIRMWARE(BCM4339_NVRAM_NAME);
608
609struct brcmf_firmware_names {
610	u32 chipid;
611	u32 revmsk;
612	const char *bin;
613	const char *nv;
614};
615
616enum brcmf_firmware_type {
617	BRCMF_FIRMWARE_BIN,
618	BRCMF_FIRMWARE_NVRAM
619};
620
621#define BRCMF_FIRMWARE_NVRAM(name) \
622	name ## _FIRMWARE_NAME, name ## _NVRAM_NAME
623
624static const struct brcmf_firmware_names brcmf_fwname_data[] = {
625	{ BCM43143_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43143) },
626	{ BCM43241_CHIP_ID, 0x0000001F, BRCMF_FIRMWARE_NVRAM(BCM43241B0) },
627	{ BCM43241_CHIP_ID, 0xFFFFFFE0, BRCMF_FIRMWARE_NVRAM(BCM43241B4) },
628	{ BCM4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) },
629	{ BCM4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) },
630	{ BCM4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
631	{ BCM4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) },
632	{ BCM43362_CHIP_ID, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362) },
633	{ BCM4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) }
634};
635
636
637static const struct firmware *brcmf_sdio_get_fw(struct brcmf_sdio *bus,
638						  enum brcmf_firmware_type type)
639{
640	const struct firmware *fw;
641	const char *name;
642	int err, i;
643
644	for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) {
645		if (brcmf_fwname_data[i].chipid == bus->ci->chip &&
646		    brcmf_fwname_data[i].revmsk & BIT(bus->ci->chiprev)) {
647			switch (type) {
648			case BRCMF_FIRMWARE_BIN:
649				name = brcmf_fwname_data[i].bin;
650				break;
651			case BRCMF_FIRMWARE_NVRAM:
652				name = brcmf_fwname_data[i].nv;
653				break;
654			default:
655				brcmf_err("invalid firmware type (%d)\n", type);
656				return NULL;
657			}
658			goto found;
659		}
660	}
661	brcmf_err("Unknown chipid %d [%d]\n",
662		  bus->ci->chip, bus->ci->chiprev);
663	return NULL;
664
665found:
666	err = request_firmware(&fw, name, &bus->sdiodev->func[2]->dev);
667	if ((err) || (!fw)) {
668		brcmf_err("fail to request firmware %s (%d)\n", name, err);
669		return NULL;
670	}
671
672	return fw;
673}
674
675static void pkt_align(struct sk_buff *p, int len, int align)
676{
677	uint datalign;
678	datalign = (unsigned long)(p->data);
679	datalign = roundup(datalign, (align)) - datalign;
680	if (datalign)
681		skb_pull(p, datalign);
682	__skb_trim(p, len);
683}
684
685/* To check if there's window offered */
686static bool data_ok(struct brcmf_sdio *bus)
687{
688	return (u8)(bus->tx_max - bus->tx_seq) != 0 &&
689	       ((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0;
690}
691
692/*
693 * Reads a register in the SDIO hardware block. This block occupies a series of
694 * adresses on the 32 bit backplane bus.
695 */
696static int r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
697{
698	struct brcmf_core *core;
699	int ret;
700
701	core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
702	*regvar = brcmf_sdiod_regrl(bus->sdiodev, core->base + offset, &ret);
703
704	return ret;
705}
706
707static int w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
708{
709	struct brcmf_core *core;
710	int ret;
711
712	core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
713	brcmf_sdiod_regwl(bus->sdiodev, core->base + reg_offset, regval, &ret);
714
715	return ret;
716}
717
718static int
719brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
720{
721	u8 wr_val = 0, rd_val, cmp_val, bmask;
722	int err = 0;
723	int try_cnt = 0;
724
725	brcmf_dbg(TRACE, "Enter\n");
726
727	wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
728	/* 1st KSO write goes to AOS wake up core if device is asleep  */
729	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
730			  wr_val, &err);
731	if (err) {
732		brcmf_err("SDIO_AOS KSO write error: %d\n", err);
733		return err;
734	}
735
736	if (on) {
737		/* device WAKEUP through KSO:
738		 * write bit 0 & read back until
739		 * both bits 0 (kso bit) & 1 (dev on status) are set
740		 */
741		cmp_val = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK |
742			  SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK;
743		bmask = cmp_val;
744		usleep_range(2000, 3000);
745	} else {
746		/* Put device to sleep, turn off KSO */
747		cmp_val = 0;
748		/* only check for bit0, bit1(dev on status) may not
749		 * get cleared right away
750		 */
751		bmask = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK;
752	}
753
754	do {
755		/* reliable KSO bit set/clr:
756		 * the sdiod sleep write access is synced to PMU 32khz clk
757		 * just one write attempt may fail,
758		 * read it back until it matches written value
759		 */
760		rd_val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
761					   &err);
762		if (((rd_val & bmask) == cmp_val) && !err)
763			break;
764		brcmf_dbg(SDIO, "KSO wr/rd retry:%d (max: %d) ERR:%x\n",
765			  try_cnt, MAX_KSO_ATTEMPTS, err);
766		udelay(KSO_WAIT_US);
767		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
768				  wr_val, &err);
769	} while (try_cnt++ < MAX_KSO_ATTEMPTS);
770
771	return err;
772}
773
774#define PKT_AVAILABLE()		(intstatus & I_HMB_FRAME_IND)
775
776#define HOSTINTMASK		(I_HMB_SW_MASK | I_CHIPACTIVE)
777
778/* Turn backplane clock on or off */
779static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
780{
781	int err;
782	u8 clkctl, clkreq, devctl;
783	unsigned long timeout;
784
785	brcmf_dbg(SDIO, "Enter\n");
786
787	clkctl = 0;
788
789	if (bus->sr_enabled) {
790		bus->clkstate = (on ? CLK_AVAIL : CLK_SDONLY);
791		return 0;
792	}
793
794	if (on) {
795		/* Request HT Avail */
796		clkreq =
797		    bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
798
799		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
800				  clkreq, &err);
801		if (err) {
802			brcmf_err("HT Avail request error: %d\n", err);
803			return -EBADE;
804		}
805
806		/* Check current status */
807		clkctl = brcmf_sdiod_regrb(bus->sdiodev,
808					   SBSDIO_FUNC1_CHIPCLKCSR, &err);
809		if (err) {
810			brcmf_err("HT Avail read error: %d\n", err);
811			return -EBADE;
812		}
813
814		/* Go to pending and await interrupt if appropriate */
815		if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
816			/* Allow only clock-available interrupt */
817			devctl = brcmf_sdiod_regrb(bus->sdiodev,
818						   SBSDIO_DEVICE_CTL, &err);
819			if (err) {
820				brcmf_err("Devctl error setting CA: %d\n",
821					  err);
822				return -EBADE;
823			}
824
825			devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
826			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
827					  devctl, &err);
828			brcmf_dbg(SDIO, "CLKCTL: set PENDING\n");
829			bus->clkstate = CLK_PENDING;
830
831			return 0;
832		} else if (bus->clkstate == CLK_PENDING) {
833			/* Cancel CA-only interrupt filter */
834			devctl = brcmf_sdiod_regrb(bus->sdiodev,
835						   SBSDIO_DEVICE_CTL, &err);
836			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
837			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
838					  devctl, &err);
839		}
840
841		/* Otherwise, wait here (polling) for HT Avail */
842		timeout = jiffies +
843			  msecs_to_jiffies(PMU_MAX_TRANSITION_DLY/1000);
844		while (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
845			clkctl = brcmf_sdiod_regrb(bus->sdiodev,
846						   SBSDIO_FUNC1_CHIPCLKCSR,
847						   &err);
848			if (time_after(jiffies, timeout))
849				break;
850			else
851				usleep_range(5000, 10000);
852		}
853		if (err) {
854			brcmf_err("HT Avail request error: %d\n", err);
855			return -EBADE;
856		}
857		if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
858			brcmf_err("HT Avail timeout (%d): clkctl 0x%02x\n",
859				  PMU_MAX_TRANSITION_DLY, clkctl);
860			return -EBADE;
861		}
862
863		/* Mark clock available */
864		bus->clkstate = CLK_AVAIL;
865		brcmf_dbg(SDIO, "CLKCTL: turned ON\n");
866
867#if defined(DEBUG)
868		if (!bus->alp_only) {
869			if (SBSDIO_ALPONLY(clkctl))
870				brcmf_err("HT Clock should be on\n");
871		}
872#endif				/* defined (DEBUG) */
873
874		bus->activity = true;
875	} else {
876		clkreq = 0;
877
878		if (bus->clkstate == CLK_PENDING) {
879			/* Cancel CA-only interrupt filter */
880			devctl = brcmf_sdiod_regrb(bus->sdiodev,
881						   SBSDIO_DEVICE_CTL, &err);
882			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
883			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
884					  devctl, &err);
885		}
886
887		bus->clkstate = CLK_SDONLY;
888		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
889				  clkreq, &err);
890		brcmf_dbg(SDIO, "CLKCTL: turned OFF\n");
891		if (err) {
892			brcmf_err("Failed access turning clock off: %d\n",
893				  err);
894			return -EBADE;
895		}
896	}
897	return 0;
898}
899
900/* Change idle/active SD state */
901static int brcmf_sdio_sdclk(struct brcmf_sdio *bus, bool on)
902{
903	brcmf_dbg(SDIO, "Enter\n");
904
905	if (on)
906		bus->clkstate = CLK_SDONLY;
907	else
908		bus->clkstate = CLK_NONE;
909
910	return 0;
911}
912
913/* Transition SD and backplane clock readiness */
914static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
915{
916#ifdef DEBUG
917	uint oldstate = bus->clkstate;
918#endif				/* DEBUG */
919
920	brcmf_dbg(SDIO, "Enter\n");
921
922	/* Early exit if we're already there */
923	if (bus->clkstate == target) {
924		if (target == CLK_AVAIL) {
925			brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
926			bus->activity = true;
927		}
928		return 0;
929	}
930
931	switch (target) {
932	case CLK_AVAIL:
933		/* Make sure SD clock is available */
934		if (bus->clkstate == CLK_NONE)
935			brcmf_sdio_sdclk(bus, true);
936		/* Now request HT Avail on the backplane */
937		brcmf_sdio_htclk(bus, true, pendok);
938		brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
939		bus->activity = true;
940		break;
941
942	case CLK_SDONLY:
943		/* Remove HT request, or bring up SD clock */
944		if (bus->clkstate == CLK_NONE)
945			brcmf_sdio_sdclk(bus, true);
946		else if (bus->clkstate == CLK_AVAIL)
947			brcmf_sdio_htclk(bus, false, false);
948		else
949			brcmf_err("request for %d -> %d\n",
950				  bus->clkstate, target);
951		brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
952		break;
953
954	case CLK_NONE:
955		/* Make sure to remove HT request */
956		if (bus->clkstate == CLK_AVAIL)
957			brcmf_sdio_htclk(bus, false, false);
958		/* Now remove the SD clock */
959		brcmf_sdio_sdclk(bus, false);
960		brcmf_sdio_wd_timer(bus, 0);
961		break;
962	}
963#ifdef DEBUG
964	brcmf_dbg(SDIO, "%d -> %d\n", oldstate, bus->clkstate);
965#endif				/* DEBUG */
966
967	return 0;
968}
969
970static int
971brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
972{
973	int err = 0;
974
975	brcmf_dbg(SDIO, "Enter: request %s currently %s\n",
976		  (sleep ? "SLEEP" : "WAKE"),
977		  (bus->sleeping ? "SLEEP" : "WAKE"));
978
979	/* If SR is enabled control bus state with KSO */
980	if (bus->sr_enabled) {
981		/* Done if we're already in the requested state */
982		if (sleep == bus->sleeping)
983			goto end;
984
985		/* Going to sleep */
986		if (sleep) {
987			/* Don't sleep if something is pending */
988			if (atomic_read(&bus->intstatus) ||
989			    atomic_read(&bus->ipend) > 0 ||
990			    (!atomic_read(&bus->fcstate) &&
991			    brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
992			    data_ok(bus)))
993				 return -EBUSY;
994			err = brcmf_sdio_kso_control(bus, false);
995			/* disable watchdog */
996			if (!err)
997				brcmf_sdio_wd_timer(bus, 0);
998		} else {
999			bus->idlecount = 0;
1000			err = brcmf_sdio_kso_control(bus, true);
1001		}
1002		if (!err) {
1003			/* Change state */
1004			bus->sleeping = sleep;
1005			brcmf_dbg(SDIO, "new state %s\n",
1006				  (sleep ? "SLEEP" : "WAKE"));
1007		} else {
1008			brcmf_err("error while changing bus sleep state %d\n",
1009				  err);
1010			return err;
1011		}
1012	}
1013
1014end:
1015	/* control clocks */
1016	if (sleep) {
1017		if (!bus->sr_enabled)
1018			brcmf_sdio_clkctl(bus, CLK_NONE, pendok);
1019	} else {
1020		brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok);
1021	}
1022
1023	return err;
1024
1025}
1026
1027#ifdef DEBUG
1028static inline bool brcmf_sdio_valid_shared_address(u32 addr)
1029{
1030	return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
1031}
1032
1033static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
1034				 struct sdpcm_shared *sh)
1035{
1036	u32 addr;
1037	int rv;
1038	u32 shaddr = 0;
1039	struct sdpcm_shared_le sh_le;
1040	__le32 addr_le;
1041
1042	shaddr = bus->ci->rambase + bus->ramsize - 4;
1043
1044	/*
1045	 * Read last word in socram to determine
1046	 * address of sdpcm_shared structure
1047	 */
1048	sdio_claim_host(bus->sdiodev->func[1]);
1049	brcmf_sdio_bus_sleep(bus, false, false);
1050	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
1051	sdio_release_host(bus->sdiodev->func[1]);
1052	if (rv < 0)
1053		return rv;
1054
1055	addr = le32_to_cpu(addr_le);
1056
1057	brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr);
1058
1059	/*
1060	 * Check if addr is valid.
1061	 * NVRAM length at the end of memory should have been overwritten.
1062	 */
1063	if (!brcmf_sdio_valid_shared_address(addr)) {
1064			brcmf_err("invalid sdpcm_shared address 0x%08X\n",
1065				  addr);
1066			return -EINVAL;
1067	}
1068
1069	/* Read hndrte_shared structure */
1070	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
1071			       sizeof(struct sdpcm_shared_le));
1072	if (rv < 0)
1073		return rv;
1074
1075	/* Endianness */
1076	sh->flags = le32_to_cpu(sh_le.flags);
1077	sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
1078	sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
1079	sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
1080	sh->assert_line = le32_to_cpu(sh_le.assert_line);
1081	sh->console_addr = le32_to_cpu(sh_le.console_addr);
1082	sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
1083
1084	if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) {
1085		brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
1086			  SDPCM_SHARED_VERSION,
1087			  sh->flags & SDPCM_SHARED_VERSION_MASK);
1088		return -EPROTO;
1089	}
1090
1091	return 0;
1092}
1093
1094static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
1095{
1096	struct sdpcm_shared sh;
1097
1098	if (brcmf_sdio_readshared(bus, &sh) == 0)
1099		bus->console_addr = sh.console_addr;
1100}
1101#else
1102static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
1103{
1104}
1105#endif /* DEBUG */
1106
1107static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
1108{
1109	u32 intstatus = 0;
1110	u32 hmb_data;
1111	u8 fcbits;
1112	int ret;
1113
1114	brcmf_dbg(SDIO, "Enter\n");
1115
1116	/* Read mailbox data and ack that we did so */
1117	ret = r_sdreg32(bus, &hmb_data,
1118			offsetof(struct sdpcmd_regs, tohostmailboxdata));
1119
1120	if (ret == 0)
1121		w_sdreg32(bus, SMB_INT_ACK,
1122			  offsetof(struct sdpcmd_regs, tosbmailbox));
1123	bus->sdcnt.f1regdata += 2;
1124
1125	/* Dongle recomposed rx frames, accept them again */
1126	if (hmb_data & HMB_DATA_NAKHANDLED) {
1127		brcmf_dbg(SDIO, "Dongle reports NAK handled, expect rtx of %d\n",
1128			  bus->rx_seq);
1129		if (!bus->rxskip)
1130			brcmf_err("unexpected NAKHANDLED!\n");
1131
1132		bus->rxskip = false;
1133		intstatus |= I_HMB_FRAME_IND;
1134	}
1135
1136	/*
1137	 * DEVREADY does not occur with gSPI.
1138	 */
1139	if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) {
1140		bus->sdpcm_ver =
1141		    (hmb_data & HMB_DATA_VERSION_MASK) >>
1142		    HMB_DATA_VERSION_SHIFT;
1143		if (bus->sdpcm_ver != SDPCM_PROT_VERSION)
1144			brcmf_err("Version mismatch, dongle reports %d, "
1145				  "expecting %d\n",
1146				  bus->sdpcm_ver, SDPCM_PROT_VERSION);
1147		else
1148			brcmf_dbg(SDIO, "Dongle ready, protocol version %d\n",
1149				  bus->sdpcm_ver);
1150
1151		/*
1152		 * Retrieve console state address now that firmware should have
1153		 * updated it.
1154		 */
1155		brcmf_sdio_get_console_addr(bus);
1156	}
1157
1158	/*
1159	 * Flow Control has been moved into the RX headers and this out of band
1160	 * method isn't used any more.
1161	 * remaining backward compatible with older dongles.
1162	 */
1163	if (hmb_data & HMB_DATA_FC) {
1164		fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >>
1165							HMB_DATA_FCDATA_SHIFT;
1166
1167		if (fcbits & ~bus->flowcontrol)
1168			bus->sdcnt.fc_xoff++;
1169
1170		if (bus->flowcontrol & ~fcbits)
1171			bus->sdcnt.fc_xon++;
1172
1173		bus->sdcnt.fc_rcvd++;
1174		bus->flowcontrol = fcbits;
1175	}
1176
1177	/* Shouldn't be any others */
1178	if (hmb_data & ~(HMB_DATA_DEVREADY |
1179			 HMB_DATA_NAKHANDLED |
1180			 HMB_DATA_FC |
1181			 HMB_DATA_FWREADY |
1182			 HMB_DATA_FCDATA_MASK | HMB_DATA_VERSION_MASK))
1183		brcmf_err("Unknown mailbox data content: 0x%02x\n",
1184			  hmb_data);
1185
1186	return intstatus;
1187}
1188
1189static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
1190{
1191	uint retries = 0;
1192	u16 lastrbc;
1193	u8 hi, lo;
1194	int err;
1195
1196	brcmf_err("%sterminate frame%s\n",
1197		  abort ? "abort command, " : "",
1198		  rtx ? ", send NAK" : "");
1199
1200	if (abort)
1201		brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
1202
1203	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
1204			  SFC_RF_TERM, &err);
1205	bus->sdcnt.f1regdata++;
1206
1207	/* Wait until the packet has been flushed (device/FIFO stable) */
1208	for (lastrbc = retries = 0xffff; retries > 0; retries--) {
1209		hi = brcmf_sdiod_regrb(bus->sdiodev,
1210				       SBSDIO_FUNC1_RFRAMEBCHI, &err);
1211		lo = brcmf_sdiod_regrb(bus->sdiodev,
1212				       SBSDIO_FUNC1_RFRAMEBCLO, &err);
1213		bus->sdcnt.f1regdata += 2;
1214
1215		if ((hi == 0) && (lo == 0))
1216			break;
1217
1218		if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) {
1219			brcmf_err("count growing: last 0x%04x now 0x%04x\n",
1220				  lastrbc, (hi << 8) + lo);
1221		}
1222		lastrbc = (hi << 8) + lo;
1223	}
1224
1225	if (!retries)
1226		brcmf_err("count never zeroed: last 0x%04x\n", lastrbc);
1227	else
1228		brcmf_dbg(SDIO, "flush took %d iterations\n", 0xffff - retries);
1229
1230	if (rtx) {
1231		bus->sdcnt.rxrtx++;
1232		err = w_sdreg32(bus, SMB_NAK,
1233				offsetof(struct sdpcmd_regs, tosbmailbox));
1234
1235		bus->sdcnt.f1regdata++;
1236		if (err == 0)
1237			bus->rxskip = true;
1238	}
1239
1240	/* Clear partial in any case */
1241	bus->cur_read.len = 0;
1242}
1243
1244/* return total length of buffer chain */
1245static uint brcmf_sdio_glom_len(struct brcmf_sdio *bus)
1246{
1247	struct sk_buff *p;
1248	uint total;
1249
1250	total = 0;
1251	skb_queue_walk(&bus->glom, p)
1252		total += p->len;
1253	return total;
1254}
1255
1256static void brcmf_sdio_free_glom(struct brcmf_sdio *bus)
1257{
1258	struct sk_buff *cur, *next;
1259
1260	skb_queue_walk_safe(&bus->glom, cur, next) {
1261		skb_unlink(cur, &bus->glom);
1262		brcmu_pkt_buf_free_skb(cur);
1263	}
1264}
1265
1266/**
1267 * brcmfmac sdio bus specific header
1268 * This is the lowest layer header wrapped on the packets transmitted between
1269 * host and WiFi dongle which contains information needed for SDIO core and
1270 * firmware
1271 *
1272 * It consists of 3 parts: hardware header, hardware extension header and
1273 * software header
1274 * hardware header (frame tag) - 4 bytes
1275 * Byte 0~1: Frame length
1276 * Byte 2~3: Checksum, bit-wise inverse of frame length
1277 * hardware extension header - 8 bytes
1278 * Tx glom mode only, N/A for Rx or normal Tx
1279 * Byte 0~1: Packet length excluding hw frame tag
1280 * Byte 2: Reserved
1281 * Byte 3: Frame flags, bit 0: last frame indication
1282 * Byte 4~5: Reserved
1283 * Byte 6~7: Tail padding length
1284 * software header - 8 bytes
1285 * Byte 0: Rx/Tx sequence number
1286 * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
1287 * Byte 2: Length of next data frame, reserved for Tx
1288 * Byte 3: Data offset
1289 * Byte 4: Flow control bits, reserved for Tx
1290 * Byte 5: Maximum Sequence number allowed by firmware for Tx, N/A for Tx packet
1291 * Byte 6~7: Reserved
1292 */
1293#define SDPCM_HWHDR_LEN			4
1294#define SDPCM_HWEXT_LEN			8
1295#define SDPCM_SWHDR_LEN			8
1296#define SDPCM_HDRLEN			(SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN)
1297/* software header */
1298#define SDPCM_SEQ_MASK			0x000000ff
1299#define SDPCM_SEQ_WRAP			256
1300#define SDPCM_CHANNEL_MASK		0x00000f00
1301#define SDPCM_CHANNEL_SHIFT		8
1302#define SDPCM_CONTROL_CHANNEL		0	/* Control */
1303#define SDPCM_EVENT_CHANNEL		1	/* Asyc Event Indication */
1304#define SDPCM_DATA_CHANNEL		2	/* Data Xmit/Recv */
1305#define SDPCM_GLOM_CHANNEL		3	/* Coalesced packets */
1306#define SDPCM_TEST_CHANNEL		15	/* Test/debug packets */
1307#define SDPCM_GLOMDESC(p)		(((u8 *)p)[1] & 0x80)
1308#define SDPCM_NEXTLEN_MASK		0x00ff0000
1309#define SDPCM_NEXTLEN_SHIFT		16
1310#define SDPCM_DOFFSET_MASK		0xff000000
1311#define SDPCM_DOFFSET_SHIFT		24
1312#define SDPCM_FCMASK_MASK		0x000000ff
1313#define SDPCM_WINDOW_MASK		0x0000ff00
1314#define SDPCM_WINDOW_SHIFT		8
1315
1316static inline u8 brcmf_sdio_getdatoffset(u8 *swheader)
1317{
1318	u32 hdrvalue;
1319	hdrvalue = *(u32 *)swheader;
1320	return (u8)((hdrvalue & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT);
1321}
1322
1323static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
1324			      struct brcmf_sdio_hdrinfo *rd,
1325			      enum brcmf_sdio_frmtype type)
1326{
1327	u16 len, checksum;
1328	u8 rx_seq, fc, tx_seq_max;
1329	u32 swheader;
1330
1331	trace_brcmf_sdpcm_hdr(SDPCM_RX, header);
1332
1333	/* hw header */
1334	len = get_unaligned_le16(header);
1335	checksum = get_unaligned_le16(header + sizeof(u16));
1336	/* All zero means no more to read */
1337	if (!(len | checksum)) {
1338		bus->rxpending = false;
1339		return -ENODATA;
1340	}
1341	if ((u16)(~(len ^ checksum))) {
1342		brcmf_err("HW header checksum error\n");
1343		bus->sdcnt.rx_badhdr++;
1344		brcmf_sdio_rxfail(bus, false, false);
1345		return -EIO;
1346	}
1347	if (len < SDPCM_HDRLEN) {
1348		brcmf_err("HW header length error\n");
1349		return -EPROTO;
1350	}
1351	if (type == BRCMF_SDIO_FT_SUPER &&
1352	    (roundup(len, bus->blocksize) != rd->len)) {
1353		brcmf_err("HW superframe header length error\n");
1354		return -EPROTO;
1355	}
1356	if (type == BRCMF_SDIO_FT_SUB && len > rd->len) {
1357		brcmf_err("HW subframe header length error\n");
1358		return -EPROTO;
1359	}
1360	rd->len = len;
1361
1362	/* software header */
1363	header += SDPCM_HWHDR_LEN;
1364	swheader = le32_to_cpu(*(__le32 *)header);
1365	if (type == BRCMF_SDIO_FT_SUPER && SDPCM_GLOMDESC(header)) {
1366		brcmf_err("Glom descriptor found in superframe head\n");
1367		rd->len = 0;
1368		return -EINVAL;
1369	}
1370	rx_seq = (u8)(swheader & SDPCM_SEQ_MASK);
1371	rd->channel = (swheader & SDPCM_CHANNEL_MASK) >> SDPCM_CHANNEL_SHIFT;
1372	if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL &&
1373	    type != BRCMF_SDIO_FT_SUPER) {
1374		brcmf_err("HW header length too long\n");
1375		bus->sdcnt.rx_toolong++;
1376		brcmf_sdio_rxfail(bus, false, false);
1377		rd->len = 0;
1378		return -EPROTO;
1379	}
1380	if (type == BRCMF_SDIO_FT_SUPER && rd->channel != SDPCM_GLOM_CHANNEL) {
1381		brcmf_err("Wrong channel for superframe\n");
1382		rd->len = 0;
1383		return -EINVAL;
1384	}
1385	if (type == BRCMF_SDIO_FT_SUB && rd->channel != SDPCM_DATA_CHANNEL &&
1386	    rd->channel != SDPCM_EVENT_CHANNEL) {
1387		brcmf_err("Wrong channel for subframe\n");
1388		rd->len = 0;
1389		return -EINVAL;
1390	}
1391	rd->dat_offset = brcmf_sdio_getdatoffset(header);
1392	if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) {
1393		brcmf_err("seq %d: bad data offset\n", rx_seq);
1394		bus->sdcnt.rx_badhdr++;
1395		brcmf_sdio_rxfail(bus, false, false);
1396		rd->len = 0;
1397		return -ENXIO;
1398	}
1399	if (rd->seq_num != rx_seq) {
1400		brcmf_err("seq %d: sequence number error, expect %d\n",
1401			  rx_seq, rd->seq_num);
1402		bus->sdcnt.rx_badseq++;
1403		rd->seq_num = rx_seq;
1404	}
1405	/* no need to check the reset for subframe */
1406	if (type == BRCMF_SDIO_FT_SUB)
1407		return 0;
1408	rd->len_nxtfrm = (swheader & SDPCM_NEXTLEN_MASK) >> SDPCM_NEXTLEN_SHIFT;
1409	if (rd->len_nxtfrm << 4 > MAX_RX_DATASZ) {
1410		/* only warm for NON glom packet */
1411		if (rd->channel != SDPCM_GLOM_CHANNEL)
1412			brcmf_err("seq %d: next length error\n", rx_seq);
1413		rd->len_nxtfrm = 0;
1414	}
1415	swheader = le32_to_cpu(*(__le32 *)(header + 4));
1416	fc = swheader & SDPCM_FCMASK_MASK;
1417	if (bus->flowcontrol != fc) {
1418		if (~bus->flowcontrol & fc)
1419			bus->sdcnt.fc_xoff++;
1420		if (bus->flowcontrol & ~fc)
1421			bus->sdcnt.fc_xon++;
1422		bus->sdcnt.fc_rcvd++;
1423		bus->flowcontrol = fc;
1424	}
1425	tx_seq_max = (swheader & SDPCM_WINDOW_MASK) >> SDPCM_WINDOW_SHIFT;
1426	if ((u8)(tx_seq_max - bus->tx_seq) > 0x40) {
1427		brcmf_err("seq %d: max tx seq number error\n", rx_seq);
1428		tx_seq_max = bus->tx_seq + 2;
1429	}
1430	bus->tx_max = tx_seq_max;
1431
1432	return 0;
1433}
1434
1435static inline void brcmf_sdio_update_hwhdr(u8 *header, u16 frm_length)
1436{
1437	*(__le16 *)header = cpu_to_le16(frm_length);
1438	*(((__le16 *)header) + 1) = cpu_to_le16(~frm_length);
1439}
1440
1441static void brcmf_sdio_hdpack(struct brcmf_sdio *bus, u8 *header,
1442			      struct brcmf_sdio_hdrinfo *hd_info)
1443{
1444	u32 hdrval;
1445	u8 hdr_offset;
1446
1447	brcmf_sdio_update_hwhdr(header, hd_info->len);
1448	hdr_offset = SDPCM_HWHDR_LEN;
1449
1450	if (bus->txglom) {
1451		hdrval = (hd_info->len - hdr_offset) | (hd_info->lastfrm << 24);
1452		*((__le32 *)(header + hdr_offset)) = cpu_to_le32(hdrval);
1453		hdrval = (u16)hd_info->tail_pad << 16;
1454		*(((__le32 *)(header + hdr_offset)) + 1) = cpu_to_le32(hdrval);
1455		hdr_offset += SDPCM_HWEXT_LEN;
1456	}
1457
1458	hdrval = hd_info->seq_num;
1459	hdrval |= (hd_info->channel << SDPCM_CHANNEL_SHIFT) &
1460		  SDPCM_CHANNEL_MASK;
1461	hdrval |= (hd_info->dat_offset << SDPCM_DOFFSET_SHIFT) &
1462		  SDPCM_DOFFSET_MASK;
1463	*((__le32 *)(header + hdr_offset)) = cpu_to_le32(hdrval);
1464	*(((__le32 *)(header + hdr_offset)) + 1) = 0;
1465	trace_brcmf_sdpcm_hdr(SDPCM_TX + !!(bus->txglom), header);
1466}
1467
1468static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1469{
1470	u16 dlen, totlen;
1471	u8 *dptr, num = 0;
1472	u16 sublen;
1473	struct sk_buff *pfirst, *pnext;
1474
1475	int errcode;
1476	u8 doff, sfdoff;
1477
1478	struct brcmf_sdio_hdrinfo rd_new;
1479
1480	/* If packets, issue read(s) and send up packet chain */
1481	/* Return sequence numbers consumed? */
1482
1483	brcmf_dbg(SDIO, "start: glomd %p glom %p\n",
1484		  bus->glomd, skb_peek(&bus->glom));
1485
1486	/* If there's a descriptor, generate the packet chain */
1487	if (bus->glomd) {
1488		pfirst = pnext = NULL;
1489		dlen = (u16) (bus->glomd->len);
1490		dptr = bus->glomd->data;
1491		if (!dlen || (dlen & 1)) {
1492			brcmf_err("bad glomd len(%d), ignore descriptor\n",
1493				  dlen);
1494			dlen = 0;
1495		}
1496
1497		for (totlen = num = 0; dlen; num++) {
1498			/* Get (and move past) next length */
1499			sublen = get_unaligned_le16(dptr);
1500			dlen -= sizeof(u16);
1501			dptr += sizeof(u16);
1502			if ((sublen < SDPCM_HDRLEN) ||
1503			    ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) {
1504				brcmf_err("descriptor len %d bad: %d\n",
1505					  num, sublen);
1506				pnext = NULL;
1507				break;
1508			}
1509			if (sublen % bus->sgentry_align) {
1510				brcmf_err("sublen %d not multiple of %d\n",
1511					  sublen, bus->sgentry_align);
1512			}
1513			totlen += sublen;
1514
1515			/* For last frame, adjust read len so total
1516				 is a block multiple */
1517			if (!dlen) {
1518				sublen +=
1519				    (roundup(totlen, bus->blocksize) - totlen);
1520				totlen = roundup(totlen, bus->blocksize);
1521			}
1522
1523			/* Allocate/chain packet for next subframe */
1524			pnext = brcmu_pkt_buf_get_skb(sublen + bus->sgentry_align);
1525			if (pnext == NULL) {
1526				brcmf_err("bcm_pkt_buf_get_skb failed, num %d len %d\n",
1527					  num, sublen);
1528				break;
1529			}
1530			skb_queue_tail(&bus->glom, pnext);
1531
1532			/* Adhere to start alignment requirements */
1533			pkt_align(pnext, sublen, bus->sgentry_align);
1534		}
1535
1536		/* If all allocations succeeded, save packet chain
1537			 in bus structure */
1538		if (pnext) {
1539			brcmf_dbg(GLOM, "allocated %d-byte packet chain for %d subframes\n",
1540				  totlen, num);
1541			if (BRCMF_GLOM_ON() && bus->cur_read.len &&
1542			    totlen != bus->cur_read.len) {
1543				brcmf_dbg(GLOM, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n",
1544					  bus->cur_read.len, totlen, rxseq);
1545			}
1546			pfirst = pnext = NULL;
1547		} else {
1548			brcmf_sdio_free_glom(bus);
1549			num = 0;
1550		}
1551
1552		/* Done with descriptor packet */
1553		brcmu_pkt_buf_free_skb(bus->glomd);
1554		bus->glomd = NULL;
1555		bus->cur_read.len = 0;
1556	}
1557
1558	/* Ok -- either we just generated a packet chain,
1559		 or had one from before */
1560	if (!skb_queue_empty(&bus->glom)) {
1561		if (BRCMF_GLOM_ON()) {
1562			brcmf_dbg(GLOM, "try superframe read, packet chain:\n");
1563			skb_queue_walk(&bus->glom, pnext) {
1564				brcmf_dbg(GLOM, "    %p: %p len 0x%04x (%d)\n",
1565					  pnext, (u8 *) (pnext->data),
1566					  pnext->len, pnext->len);
1567			}
1568		}
1569
1570		pfirst = skb_peek(&bus->glom);
1571		dlen = (u16) brcmf_sdio_glom_len(bus);
1572
1573		/* Do an SDIO read for the superframe.  Configurable iovar to
1574		 * read directly into the chained packet, or allocate a large
1575		 * packet and and copy into the chain.
1576		 */
1577		sdio_claim_host(bus->sdiodev->func[1]);
1578		errcode = brcmf_sdiod_recv_chain(bus->sdiodev,
1579						 &bus->glom, dlen);
1580		sdio_release_host(bus->sdiodev->func[1]);
1581		bus->sdcnt.f2rxdata++;
1582
1583		/* On failure, kill the superframe, allow a couple retries */
1584		if (errcode < 0) {
1585			brcmf_err("glom read of %d bytes failed: %d\n",
1586				  dlen, errcode);
1587
1588			sdio_claim_host(bus->sdiodev->func[1]);
1589			if (bus->glomerr++ < 3) {
1590				brcmf_sdio_rxfail(bus, true, true);
1591			} else {
1592				bus->glomerr = 0;
1593				brcmf_sdio_rxfail(bus, true, false);
1594				bus->sdcnt.rxglomfail++;
1595				brcmf_sdio_free_glom(bus);
1596			}
1597			sdio_release_host(bus->sdiodev->func[1]);
1598			return 0;
1599		}
1600
1601		brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1602				   pfirst->data, min_t(int, pfirst->len, 48),
1603				   "SUPERFRAME:\n");
1604
1605		rd_new.seq_num = rxseq;
1606		rd_new.len = dlen;
1607		sdio_claim_host(bus->sdiodev->func[1]);
1608		errcode = brcmf_sdio_hdparse(bus, pfirst->data, &rd_new,
1609					     BRCMF_SDIO_FT_SUPER);
1610		sdio_release_host(bus->sdiodev->func[1]);
1611		bus->cur_read.len = rd_new.len_nxtfrm << 4;
1612
1613		/* Remove superframe header, remember offset */
1614		skb_pull(pfirst, rd_new.dat_offset);
1615		sfdoff = rd_new.dat_offset;
1616		num = 0;
1617
1618		/* Validate all the subframe headers */
1619		skb_queue_walk(&bus->glom, pnext) {
1620			/* leave when invalid subframe is found */
1621			if (errcode)
1622				break;
1623
1624			rd_new.len = pnext->len;
1625			rd_new.seq_num = rxseq++;
1626			sdio_claim_host(bus->sdiodev->func[1]);
1627			errcode = brcmf_sdio_hdparse(bus, pnext->data, &rd_new,
1628						     BRCMF_SDIO_FT_SUB);
1629			sdio_release_host(bus->sdiodev->func[1]);
1630			brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1631					   pnext->data, 32, "subframe:\n");
1632
1633			num++;
1634		}
1635
1636		if (errcode) {
1637			/* Terminate frame on error, request
1638				 a couple retries */
1639			sdio_claim_host(bus->sdiodev->func[1]);
1640			if (bus->glomerr++ < 3) {
1641				/* Restore superframe header space */
1642				skb_push(pfirst, sfdoff);
1643				brcmf_sdio_rxfail(bus, true, true);
1644			} else {
1645				bus->glomerr = 0;
1646				brcmf_sdio_rxfail(bus, true, false);
1647				bus->sdcnt.rxglomfail++;
1648				brcmf_sdio_free_glom(bus);
1649			}
1650			sdio_release_host(bus->sdiodev->func[1]);
1651			bus->cur_read.len = 0;
1652			return 0;
1653		}
1654
1655		/* Basic SD framing looks ok - process each packet (header) */
1656
1657		skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
1658			dptr = (u8 *) (pfirst->data);
1659			sublen = get_unaligned_le16(dptr);
1660			doff = brcmf_sdio_getdatoffset(&dptr[SDPCM_HWHDR_LEN]);
1661
1662			brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
1663					   dptr, pfirst->len,
1664					   "Rx Subframe Data:\n");
1665
1666			__skb_trim(pfirst, sublen);
1667			skb_pull(pfirst, doff);
1668
1669			if (pfirst->len == 0) {
1670				skb_unlink(pfirst, &bus->glom);
1671				brcmu_pkt_buf_free_skb(pfirst);
1672				continue;
1673			}
1674
1675			brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1676					   pfirst->data,
1677					   min_t(int, pfirst->len, 32),
1678					   "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n",
1679					   bus->glom.qlen, pfirst, pfirst->data,
1680					   pfirst->len, pfirst->next,
1681					   pfirst->prev);
1682			skb_unlink(pfirst, &bus->glom);
1683			brcmf_rx_frame(bus->sdiodev->dev, pfirst);
1684			bus->sdcnt.rxglompkts++;
1685		}
1686
1687		bus->sdcnt.rxglomframes++;
1688	}
1689	return num;
1690}
1691
1692static int brcmf_sdio_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
1693				     bool *pending)
1694{
1695	DECLARE_WAITQUEUE(wait, current);
1696	int timeout = msecs_to_jiffies(DCMD_RESP_TIMEOUT);
1697
1698	/* Wait until control frame is available */
1699	add_wait_queue(&bus->dcmd_resp_wait, &wait);
1700	set_current_state(TASK_INTERRUPTIBLE);
1701
1702	while (!(*condition) && (!signal_pending(current) && timeout))
1703		timeout = schedule_timeout(timeout);
1704
1705	if (signal_pending(current))
1706		*pending = true;
1707
1708	set_current_state(TASK_RUNNING);
1709	remove_wait_queue(&bus->dcmd_resp_wait, &wait);
1710
1711	return timeout;
1712}
1713
1714static int brcmf_sdio_dcmd_resp_wake(struct brcmf_sdio *bus)
1715{
1716	if (waitqueue_active(&bus->dcmd_resp_wait))
1717		wake_up_interruptible(&bus->dcmd_resp_wait);
1718
1719	return 0;
1720}
1721static void
1722brcmf_sdio_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
1723{
1724	uint rdlen, pad;
1725	u8 *buf = NULL, *rbuf;
1726	int sdret;
1727
1728	brcmf_dbg(TRACE, "Enter\n");
1729
1730	if (bus->rxblen)
1731		buf = vzalloc(bus->rxblen);
1732	if (!buf)
1733		goto done;
1734
1735	rbuf = bus->rxbuf;
1736	pad = ((unsigned long)rbuf % bus->head_align);
1737	if (pad)
1738		rbuf += (bus->head_align - pad);
1739
1740	/* Copy the already-read portion over */
1741	memcpy(buf, hdr, BRCMF_FIRSTREAD);
1742	if (len <= BRCMF_FIRSTREAD)
1743		goto gotpkt;
1744
1745	/* Raise rdlen to next SDIO block to avoid tail command */
1746	rdlen = len - BRCMF_FIRSTREAD;
1747	if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
1748		pad = bus->blocksize - (rdlen % bus->blocksize);
1749		if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
1750		    ((len + pad) < bus->sdiodev->bus_if->maxctl))
1751			rdlen += pad;
1752	} else if (rdlen % bus->head_align) {
1753		rdlen += bus->head_align - (rdlen % bus->head_align);
1754	}
1755
1756	/* Drop if the read is too big or it exceeds our maximum */
1757	if ((rdlen + BRCMF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) {
1758		brcmf_err("%d-byte control read exceeds %d-byte buffer\n",
1759			  rdlen, bus->sdiodev->bus_if->maxctl);
1760		brcmf_sdio_rxfail(bus, false, false);
1761		goto done;
1762	}
1763
1764	if ((len - doff) > bus->sdiodev->bus_if->maxctl) {
1765		brcmf_err("%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
1766			  len, len - doff, bus->sdiodev->bus_if->maxctl);
1767		bus->sdcnt.rx_toolong++;
1768		brcmf_sdio_rxfail(bus, false, false);
1769		goto done;
1770	}
1771
1772	/* Read remain of frame body */
1773	sdret = brcmf_sdiod_recv_buf(bus->sdiodev, rbuf, rdlen);
1774	bus->sdcnt.f2rxdata++;
1775
1776	/* Control frame failures need retransmission */
1777	if (sdret < 0) {
1778		brcmf_err("read %d control bytes failed: %d\n",
1779			  rdlen, sdret);
1780		bus->sdcnt.rxc_errors++;
1781		brcmf_sdio_rxfail(bus, true, true);
1782		goto done;
1783	} else
1784		memcpy(buf + BRCMF_FIRSTREAD, rbuf, rdlen);
1785
1786gotpkt:
1787
1788	brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
1789			   buf, len, "RxCtrl:\n");
1790
1791	/* Point to valid data and indicate its length */
1792	spin_lock_bh(&bus->rxctl_lock);
1793	if (bus->rxctl) {
1794		brcmf_err("last control frame is being processed.\n");
1795		spin_unlock_bh(&bus->rxctl_lock);
1796		vfree(buf);
1797		goto done;
1798	}
1799	bus->rxctl = buf + doff;
1800	bus->rxctl_orig = buf;
1801	bus->rxlen = len - doff;
1802	spin_unlock_bh(&bus->rxctl_lock);
1803
1804done:
1805	/* Awake any waiters */
1806	brcmf_sdio_dcmd_resp_wake(bus);
1807}
1808
1809/* Pad read to blocksize for efficiency */
1810static void brcmf_sdio_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
1811{
1812	if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) {
1813		*pad = bus->blocksize - (*rdlen % bus->blocksize);
1814		if (*pad <= bus->roundup && *pad < bus->blocksize &&
1815		    *rdlen + *pad + BRCMF_FIRSTREAD < MAX_RX_DATASZ)
1816			*rdlen += *pad;
1817	} else if (*rdlen % bus->head_align) {
1818		*rdlen += bus->head_align - (*rdlen % bus->head_align);
1819	}
1820}
1821
1822static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1823{
1824	struct sk_buff *pkt;		/* Packet for event or data frames */
1825	u16 pad;		/* Number of pad bytes to read */
1826	uint rxleft = 0;	/* Remaining number of frames allowed */
1827	int ret;		/* Return code from calls */
1828	uint rxcount = 0;	/* Total frames read */
1829	struct brcmf_sdio_hdrinfo *rd = &bus->cur_read, rd_new;
1830	u8 head_read = 0;
1831
1832	brcmf_dbg(TRACE, "Enter\n");
1833
1834	/* Not finished unless we encounter no more frames indication */
1835	bus->rxpending = true;
1836
1837	for (rd->seq_num = bus->rx_seq, rxleft = maxframes;
1838	     !bus->rxskip && rxleft && brcmf_bus_ready(bus->sdiodev->bus_if);
1839	     rd->seq_num++, rxleft--) {
1840
1841		/* Handle glomming separately */
1842		if (bus->glomd || !skb_queue_empty(&bus->glom)) {
1843			u8 cnt;
1844			brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n",
1845				  bus->glomd, skb_peek(&bus->glom));
1846			cnt = brcmf_sdio_rxglom(bus, rd->seq_num);
1847			brcmf_dbg(GLOM, "rxglom returned %d\n", cnt);
1848			rd->seq_num += cnt - 1;
1849			rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
1850			continue;
1851		}
1852
1853		rd->len_left = rd->len;
1854		/* read header first for unknow frame length */
1855		sdio_claim_host(bus->sdiodev->func[1]);
1856		if (!rd->len) {
1857			ret = brcmf_sdiod_recv_buf(bus->sdiodev,
1858						   bus->rxhdr, BRCMF_FIRSTREAD);
1859			bus->sdcnt.f2rxhdrs++;
1860			if (ret < 0) {
1861				brcmf_err("RXHEADER FAILED: %d\n",
1862					  ret);
1863				bus->sdcnt.rx_hdrfail++;
1864				brcmf_sdio_rxfail(bus, true, true);
1865				sdio_release_host(bus->sdiodev->func[1]);
1866				continue;
1867			}
1868
1869			brcmf_dbg_hex_dump(BRCMF_BYTES_ON() || BRCMF_HDRS_ON(),
1870					   bus->rxhdr, SDPCM_HDRLEN,
1871					   "RxHdr:\n");
1872
1873			if (brcmf_sdio_hdparse(bus, bus->rxhdr, rd,
1874					       BRCMF_SDIO_FT_NORMAL)) {
1875				sdio_release_host(bus->sdiodev->func[1]);
1876				if (!bus->rxpending)
1877					break;
1878				else
1879					continue;
1880			}
1881
1882			if (rd->channel == SDPCM_CONTROL_CHANNEL) {
1883				brcmf_sdio_read_control(bus, bus->rxhdr,
1884							rd->len,
1885							rd->dat_offset);
1886				/* prepare the descriptor for the next read */
1887				rd->len = rd->len_nxtfrm << 4;
1888				rd->len_nxtfrm = 0;
1889				/* treat all packet as event if we don't know */
1890				rd->channel = SDPCM_EVENT_CHANNEL;
1891				sdio_release_host(bus->sdiodev->func[1]);
1892				continue;
1893			}
1894			rd->len_left = rd->len > BRCMF_FIRSTREAD ?
1895				       rd->len - BRCMF_FIRSTREAD : 0;
1896			head_read = BRCMF_FIRSTREAD;
1897		}
1898
1899		brcmf_sdio_pad(bus, &pad, &rd->len_left);
1900
1901		pkt = brcmu_pkt_buf_get_skb(rd->len_left + head_read +
1902					    bus->head_align);
1903		if (!pkt) {
1904			/* Give up on data, request rtx of events */
1905			brcmf_err("brcmu_pkt_buf_get_skb failed\n");
1906			brcmf_sdio_rxfail(bus, false,
1907					    RETRYCHAN(rd->channel));
1908			sdio_release_host(bus->sdiodev->func[1]);
1909			continue;
1910		}
1911		skb_pull(pkt, head_read);
1912		pkt_align(pkt, rd->len_left, bus->head_align);
1913
1914		ret = brcmf_sdiod_recv_pkt(bus->sdiodev, pkt);
1915		bus->sdcnt.f2rxdata++;
1916		sdio_release_host(bus->sdiodev->func[1]);
1917
1918		if (ret < 0) {
1919			brcmf_err("read %d bytes from channel %d failed: %d\n",
1920				  rd->len, rd->channel, ret);
1921			brcmu_pkt_buf_free_skb(pkt);
1922			sdio_claim_host(bus->sdiodev->func[1]);
1923			brcmf_sdio_rxfail(bus, true,
1924					    RETRYCHAN(rd->channel));
1925			sdio_release_host(bus->sdiodev->func[1]);
1926			continue;
1927		}
1928
1929		if (head_read) {
1930			skb_push(pkt, head_read);
1931			memcpy(pkt->data, bus->rxhdr, head_read);
1932			head_read = 0;
1933		} else {
1934			memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN);
1935			rd_new.seq_num = rd->seq_num;
1936			sdio_claim_host(bus->sdiodev->func[1]);
1937			if (brcmf_sdio_hdparse(bus, bus->rxhdr, &rd_new,
1938					       BRCMF_SDIO_FT_NORMAL)) {
1939				rd->len = 0;
1940				brcmu_pkt_buf_free_skb(pkt);
1941			}
1942			bus->sdcnt.rx_readahead_cnt++;
1943			if (rd->len != roundup(rd_new.len, 16)) {
1944				brcmf_err("frame length mismatch:read %d, should be %d\n",
1945					  rd->len,
1946					  roundup(rd_new.len, 16) >> 4);
1947				rd->len = 0;
1948				brcmf_sdio_rxfail(bus, true, true);
1949				sdio_release_host(bus->sdiodev->func[1]);
1950				brcmu_pkt_buf_free_skb(pkt);
1951				continue;
1952			}
1953			sdio_release_host(bus->sdiodev->func[1]);
1954			rd->len_nxtfrm = rd_new.len_nxtfrm;
1955			rd->channel = rd_new.channel;
1956			rd->dat_offset = rd_new.dat_offset;
1957
1958			brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
1959					     BRCMF_DATA_ON()) &&
1960					   BRCMF_HDRS_ON(),
1961					   bus->rxhdr, SDPCM_HDRLEN,
1962					   "RxHdr:\n");
1963
1964			if (rd_new.channel == SDPCM_CONTROL_CHANNEL) {
1965				brcmf_err("readahead on control packet %d?\n",
1966					  rd_new.seq_num);
1967				/* Force retry w/normal header read */
1968				rd->len = 0;
1969				sdio_claim_host(bus->sdiodev->func[1]);
1970				brcmf_sdio_rxfail(bus, false, true);
1971				sdio_release_host(bus->sdiodev->func[1]);
1972				brcmu_pkt_buf_free_skb(pkt);
1973				continue;
1974			}
1975		}
1976
1977		brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
1978				   pkt->data, rd->len, "Rx Data:\n");
1979
1980		/* Save superframe descriptor and allocate packet frame */
1981		if (rd->channel == SDPCM_GLOM_CHANNEL) {
1982			if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_HWHDR_LEN])) {
1983				brcmf_dbg(GLOM, "glom descriptor, %d bytes:\n",
1984					  rd->len);
1985				brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1986						   pkt->data, rd->len,
1987						   "Glom Data:\n");
1988				__skb_trim(pkt, rd->len);
1989				skb_pull(pkt, SDPCM_HDRLEN);
1990				bus->glomd = pkt;
1991			} else {
1992				brcmf_err("%s: glom superframe w/o "
1993					  "descriptor!\n", __func__);
1994				sdio_claim_host(bus->sdiodev->func[1]);
1995				brcmf_sdio_rxfail(bus, false, false);
1996				sdio_release_host(bus->sdiodev->func[1]);
1997			}
1998			/* prepare the descriptor for the next read */
1999			rd->len = rd->len_nxtfrm << 4;
2000			rd->len_nxtfrm = 0;
2001			/* treat all packet as event if we don't know */
2002			rd->channel = SDPCM_EVENT_CHANNEL;
2003			continue;
2004		}
2005
2006		/* Fill in packet len and prio, deliver upward */
2007		__skb_trim(pkt, rd->len);
2008		skb_pull(pkt, rd->dat_offset);
2009
2010		/* prepare the descriptor for the next read */
2011		rd->len = rd->len_nxtfrm << 4;
2012		rd->len_nxtfrm = 0;
2013		/* treat all packet as event if we don't know */
2014		rd->channel = SDPCM_EVENT_CHANNEL;
2015
2016		if (pkt->len == 0) {
2017			brcmu_pkt_buf_free_skb(pkt);
2018			continue;
2019		}
2020
2021		brcmf_rx_frame(bus->sdiodev->dev, pkt);
2022	}
2023
2024	rxcount = maxframes - rxleft;
2025	/* Message if we hit the limit */
2026	if (!rxleft)
2027		brcmf_dbg(DATA, "hit rx limit of %d frames\n", maxframes);
2028	else
2029		brcmf_dbg(DATA, "processed %d frames\n", rxcount);
2030	/* Back off rxseq if awaiting rtx, update rx_seq */
2031	if (bus->rxskip)
2032		rd->seq_num--;
2033	bus->rx_seq = rd->seq_num;
2034
2035	return rxcount;
2036}
2037
2038static void
2039brcmf_sdio_wait_event_wakeup(struct brcmf_sdio *bus)
2040{
2041	if (waitqueue_active(&bus->ctrl_wait))
2042		wake_up_interruptible(&bus->ctrl_wait);
2043	return;
2044}
2045
2046static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio *bus, struct sk_buff *pkt)
2047{
2048	u16 head_pad;
2049	u8 *dat_buf;
2050
2051	dat_buf = (u8 *)(pkt->data);
2052
2053	/* Check head padding */
2054	head_pad = ((unsigned long)dat_buf % bus->head_align);
2055	if (head_pad) {
2056		if (skb_headroom(pkt) < head_pad) {
2057			bus->sdiodev->bus_if->tx_realloc++;
2058			head_pad = 0;
2059			if (skb_cow(pkt, head_pad))
2060				return -ENOMEM;
2061		}
2062		skb_push(pkt, head_pad);
2063		dat_buf = (u8 *)(pkt->data);
2064		memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
2065	}
2066	return head_pad;
2067}
2068
2069/**
2070 * struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for
2071 * bus layer usage.
2072 */
2073/* flag marking a dummy skb added for DMA alignment requirement */
2074#define ALIGN_SKB_FLAG		0x8000
2075/* bit mask of data length chopped from the previous packet */
2076#define ALIGN_SKB_CHOP_LEN_MASK	0x7fff
2077
2078static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus,
2079				    struct sk_buff_head *pktq,
2080				    struct sk_buff *pkt, u16 total_len)
2081{
2082	struct brcmf_sdio_dev *sdiodev;
2083	struct sk_buff *pkt_pad;
2084	u16 tail_pad, tail_chop, chain_pad;
2085	unsigned int blksize;
2086	bool lastfrm;
2087	int ntail, ret;
2088
2089	sdiodev = bus->sdiodev;
2090	blksize = sdiodev->func[SDIO_FUNC_2]->cur_blksize;
2091	/* sg entry alignment should be a divisor of block size */
2092	WARN_ON(blksize % bus->sgentry_align);
2093
2094	/* Check tail padding */
2095	lastfrm = skb_queue_is_last(pktq, pkt);
2096	tail_pad = 0;
2097	tail_chop = pkt->len % bus->sgentry_align;
2098	if (tail_chop)
2099		tail_pad = bus->sgentry_align - tail_chop;
2100	chain_pad = (total_len + tail_pad) % blksize;
2101	if (lastfrm && chain_pad)
2102		tail_pad += blksize - chain_pad;
2103	if (skb_tailroom(pkt) < tail_pad && pkt->len > blksize) {
2104		pkt_pad = bus->txglom_sgpad;
2105		if (pkt_pad == NULL)
2106			  brcmu_pkt_buf_get_skb(tail_pad + tail_chop);
2107		if (pkt_pad == NULL)
2108			return -ENOMEM;
2109		ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad);
2110		if (unlikely(ret < 0))
2111			return ret;
2112		memcpy(pkt_pad->data,
2113		       pkt->data + pkt->len - tail_chop,
2114		       tail_chop);
2115		*(u16 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
2116		skb_trim(pkt, pkt->len - tail_chop);
2117		__skb_queue_after(pktq, pkt, pkt_pad);
2118	} else {
2119		ntail = pkt->data_len + tail_pad -
2120			(pkt->end - pkt->tail);
2121		if (skb_cloned(pkt) || ntail > 0)
2122			if (pskb_expand_head(pkt, 0, ntail, GFP_ATOMIC))
2123				return -ENOMEM;
2124		if (skb_linearize(pkt))
2125			return -ENOMEM;
2126		__skb_put(pkt, tail_pad);
2127	}
2128
2129	return tail_pad;
2130}
2131
2132/**
2133 * brcmf_sdio_txpkt_prep - packet preparation for transmit
2134 * @bus: brcmf_sdio structure pointer
2135 * @pktq: packet list pointer
2136 * @chan: virtual channel to transmit the packet
2137 *
2138 * Processes to be applied to the packet
2139 *	- Align data buffer pointer
2140 *	- Align data buffer length
2141 *	- Prepare header
2142 * Return: negative value if there is error
2143 */
2144static int
2145brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
2146		      uint chan)
2147{
2148	u16 head_pad, total_len;
2149	struct sk_buff *pkt_next;
2150	u8 txseq;
2151	int ret;
2152	struct brcmf_sdio_hdrinfo hd_info = {0};
2153
2154	txseq = bus->tx_seq;
2155	total_len = 0;
2156	skb_queue_walk(pktq, pkt_next) {
2157		/* alignment packet inserted in previous
2158		 * loop cycle can be skipped as it is
2159		 * already properly aligned and does not
2160		 * need an sdpcm header.
2161		 */
2162		if (*(u16 *)(pkt_next->cb) & ALIGN_SKB_FLAG)
2163			continue;
2164
2165		/* align packet data pointer */
2166		ret = brcmf_sdio_txpkt_hdalign(bus, pkt_next);
2167		if (ret < 0)
2168			return ret;
2169		head_pad = (u16)ret;
2170		if (head_pad)
2171			memset(pkt_next->data, 0, head_pad + bus->tx_hdrlen);
2172
2173		total_len += pkt_next->len;
2174
2175		hd_info.len = pkt_next->len;
2176		hd_info.lastfrm = skb_queue_is_last(pktq, pkt_next);
2177		if (bus->txglom && pktq->qlen > 1) {
2178			ret = brcmf_sdio_txpkt_prep_sg(bus, pktq,
2179						       pkt_next, total_len);
2180			if (ret < 0)
2181				return ret;
2182			hd_info.tail_pad = (u16)ret;
2183			total_len += (u16)ret;
2184		}
2185
2186		hd_info.channel = chan;
2187		hd_info.dat_offset = head_pad + bus->tx_hdrlen;
2188		hd_info.seq_num = txseq++;
2189
2190		/* Now fill the header */
2191		brcmf_sdio_hdpack(bus, pkt_next->data, &hd_info);
2192
2193		if (BRCMF_BYTES_ON() &&
2194		    ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
2195		     (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)))
2196			brcmf_dbg_hex_dump(true, pkt_next, hd_info.len,
2197					   "Tx Frame:\n");
2198		else if (BRCMF_HDRS_ON())
2199			brcmf_dbg_hex_dump(true, pkt_next,
2200					   head_pad + bus->tx_hdrlen,
2201					   "Tx Header:\n");
2202	}
2203	/* Hardware length tag of the first packet should be total
2204	 * length of the chain (including padding)
2205	 */
2206	if (bus->txglom)
2207		brcmf_sdio_update_hwhdr(pktq->next->data, total_len);
2208	return 0;
2209}
2210
2211/**
2212 * brcmf_sdio_txpkt_postp - packet post processing for transmit
2213 * @bus: brcmf_sdio structure pointer
2214 * @pktq: packet list pointer
2215 *
2216 * Processes to be applied to the packet
2217 *	- Remove head padding
2218 *	- Remove tail padding
2219 */
2220static void
2221brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
2222{
2223	u8 *hdr;
2224	u32 dat_offset;
2225	u16 tail_pad;
2226	u16 dummy_flags, chop_len;
2227	struct sk_buff *pkt_next, *tmp, *pkt_prev;
2228
2229	skb_queue_walk_safe(pktq, pkt_next, tmp) {
2230		dummy_flags = *(u16 *)(pkt_next->cb);
2231		if (dummy_flags & ALIGN_SKB_FLAG) {
2232			chop_len = dummy_flags & ALIGN_SKB_CHOP_LEN_MASK;
2233			if (chop_len) {
2234				pkt_prev = pkt_next->prev;
2235				skb_put(pkt_prev, chop_len);
2236			}
2237			__skb_unlink(pkt_next, pktq);
2238			brcmu_pkt_buf_free_skb(pkt_next);
2239		} else {
2240			hdr = pkt_next->data + bus->tx_hdrlen - SDPCM_SWHDR_LEN;
2241			dat_offset = le32_to_cpu(*(__le32 *)hdr);
2242			dat_offset = (dat_offset & SDPCM_DOFFSET_MASK) >>
2243				     SDPCM_DOFFSET_SHIFT;
2244			skb_pull(pkt_next, dat_offset);
2245			if (bus->txglom) {
2246				tail_pad = le16_to_cpu(*(__le16 *)(hdr - 2));
2247				skb_trim(pkt_next, pkt_next->len - tail_pad);
2248			}
2249		}
2250	}
2251}
2252
2253/* Writes a HW/SW header into the packet and sends it. */
2254/* Assumes: (a) header space already there, (b) caller holds lock */
2255static int brcmf_sdio_txpkt(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
2256			    uint chan)
2257{
2258	int ret;
2259	int i;
2260	struct sk_buff *pkt_next, *tmp;
2261
2262	brcmf_dbg(TRACE, "Enter\n");
2263
2264	ret = brcmf_sdio_txpkt_prep(bus, pktq, chan);
2265	if (ret)
2266		goto done;
2267
2268	sdio_claim_host(bus->sdiodev->func[1]);
2269	ret = brcmf_sdiod_send_pkt(bus->sdiodev, pktq);
2270	bus->sdcnt.f2txdata++;
2271
2272	if (ret < 0) {
2273		/* On failure, abort the command and terminate the frame */
2274		brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
2275			  ret);
2276		bus->sdcnt.tx_sderrs++;
2277
2278		brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
2279		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
2280				  SFC_WF_TERM, NULL);
2281		bus->sdcnt.f1regdata++;
2282
2283		for (i = 0; i < 3; i++) {
2284			u8 hi, lo;
2285			hi = brcmf_sdiod_regrb(bus->sdiodev,
2286					       SBSDIO_FUNC1_WFRAMEBCHI, NULL);
2287			lo = brcmf_sdiod_regrb(bus->sdiodev,
2288					       SBSDIO_FUNC1_WFRAMEBCLO, NULL);
2289			bus->sdcnt.f1regdata += 2;
2290			if ((hi == 0) && (lo == 0))
2291				break;
2292		}
2293	}
2294	sdio_release_host(bus->sdiodev->func[1]);
2295
2296done:
2297	brcmf_sdio_txpkt_postp(bus, pktq);
2298	if (ret == 0)
2299		bus->tx_seq = (bus->tx_seq + pktq->qlen) % SDPCM_SEQ_WRAP;
2300	skb_queue_walk_safe(pktq, pkt_next, tmp) {
2301		__skb_unlink(pkt_next, pktq);
2302		brcmf_txcomplete(bus->sdiodev->dev, pkt_next, ret == 0);
2303	}
2304	return ret;
2305}
2306
2307static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
2308{
2309	struct sk_buff *pkt;
2310	struct sk_buff_head pktq;
2311	u32 intstatus = 0;
2312	int ret = 0, prec_out, i;
2313	uint cnt = 0;
2314	u8 tx_prec_map, pkt_num;
2315
2316	brcmf_dbg(TRACE, "Enter\n");
2317
2318	tx_prec_map = ~bus->flowcontrol;
2319
2320	/* Send frames until the limit or some other event */
2321	for (cnt = 0; (cnt < maxframes) && data_ok(bus);) {
2322		pkt_num = 1;
2323		__skb_queue_head_init(&pktq);
2324		if (bus->txglom)
2325			pkt_num = min_t(u8, bus->tx_max - bus->tx_seq,
2326					brcmf_sdio_txglomsz);
2327		pkt_num = min_t(u32, pkt_num,
2328				brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol));
2329		spin_lock_bh(&bus->txqlock);
2330		for (i = 0; i < pkt_num; i++) {
2331			pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map,
2332					      &prec_out);
2333			if (pkt == NULL)
2334				break;
2335			__skb_queue_tail(&pktq, pkt);
2336		}
2337		spin_unlock_bh(&bus->txqlock);
2338		if (i == 0)
2339			break;
2340
2341		ret = brcmf_sdio_txpkt(bus, &pktq, SDPCM_DATA_CHANNEL);
2342		cnt += i;
2343
2344		/* In poll mode, need to check for other events */
2345		if (!bus->intr && cnt) {
2346			/* Check device status, signal pending interrupt */
2347			sdio_claim_host(bus->sdiodev->func[1]);
2348			ret = r_sdreg32(bus, &intstatus,
2349					offsetof(struct sdpcmd_regs,
2350						 intstatus));
2351			sdio_release_host(bus->sdiodev->func[1]);
2352			bus->sdcnt.f2txdata++;
2353			if (ret != 0)
2354				break;
2355			if (intstatus & bus->hostintmask)
2356				atomic_set(&bus->ipend, 1);
2357		}
2358	}
2359
2360	/* Deflow-control stack if needed */
2361	if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) &&
2362	    bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
2363		bus->txoff = false;
2364		brcmf_txflowblock(bus->sdiodev->dev, false);
2365	}
2366
2367	return cnt;
2368}
2369
2370static void brcmf_sdio_bus_stop(struct device *dev)
2371{
2372	u32 local_hostintmask;
2373	u8 saveclk;
2374	int err;
2375	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2376	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2377	struct brcmf_sdio *bus = sdiodev->bus;
2378
2379	brcmf_dbg(TRACE, "Enter\n");
2380
2381	if (bus->watchdog_tsk) {
2382		send_sig(SIGTERM, bus->watchdog_tsk, 1);
2383		kthread_stop(bus->watchdog_tsk);
2384		bus->watchdog_tsk = NULL;
2385	}
2386
2387	if (bus_if->state == BRCMF_BUS_DOWN) {
2388		sdio_claim_host(sdiodev->func[1]);
2389
2390		/* Enable clock for device interrupts */
2391		brcmf_sdio_bus_sleep(bus, false, false);
2392
2393		/* Disable and clear interrupts at the chip level also */
2394		w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask));
2395		local_hostintmask = bus->hostintmask;
2396		bus->hostintmask = 0;
2397
2398		/* Force backplane clocks to assure F2 interrupt propagates */
2399		saveclk = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
2400					    &err);
2401		if (!err)
2402			brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
2403					  (saveclk | SBSDIO_FORCE_HT), &err);
2404		if (err)
2405			brcmf_err("Failed to force clock for F2: err %d\n",
2406				  err);
2407
2408		/* Turn off the bus (F2), free any pending packets */
2409		brcmf_dbg(INTR, "disable SDIO interrupts\n");
2410		sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
2411
2412		/* Clear any pending interrupts now that F2 is disabled */
2413		w_sdreg32(bus, local_hostintmask,
2414			  offsetof(struct sdpcmd_regs, intstatus));
2415
2416		sdio_release_host(sdiodev->func[1]);
2417	}
2418	/* Clear the data packet queues */
2419	brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
2420
2421	/* Clear any held glomming stuff */
2422	if (bus->glomd)
2423		brcmu_pkt_buf_free_skb(bus->glomd);
2424	brcmf_sdio_free_glom(bus);
2425
2426	/* Clear rx control and wake any waiters */
2427	spin_lock_bh(&bus->rxctl_lock);
2428	bus->rxlen = 0;
2429	spin_unlock_bh(&bus->rxctl_lock);
2430	brcmf_sdio_dcmd_resp_wake(bus);
2431
2432	/* Reset some F2 state stuff */
2433	bus->rxskip = false;
2434	bus->tx_seq = bus->rx_seq = 0;
2435}
2436
2437static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus)
2438{
2439	unsigned long flags;
2440
2441	if (bus->sdiodev->oob_irq_requested) {
2442		spin_lock_irqsave(&bus->sdiodev->irq_en_lock, flags);
2443		if (!bus->sdiodev->irq_en && !atomic_read(&bus->ipend)) {
2444			enable_irq(bus->sdiodev->pdata->oob_irq_nr);
2445			bus->sdiodev->irq_en = true;
2446		}
2447		spin_unlock_irqrestore(&bus->sdiodev->irq_en_lock, flags);
2448	}
2449}
2450
2451static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
2452{
2453	struct brcmf_core *buscore;
2454	u32 addr;
2455	unsigned long val;
2456	int n, ret;
2457
2458	buscore = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
2459	addr = buscore->base + offsetof(struct sdpcmd_regs, intstatus);
2460
2461	val = brcmf_sdiod_regrl(bus->sdiodev, addr, &ret);
2462	bus->sdcnt.f1regdata++;
2463	if (ret != 0)
2464		val = 0;
2465
2466	val &= bus->hostintmask;
2467	atomic_set(&bus->fcstate, !!(val & I_HMB_FC_STATE));
2468
2469	/* Clear interrupts */
2470	if (val) {
2471		brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret);
2472		bus->sdcnt.f1regdata++;
2473	}
2474
2475	if (ret) {
2476		atomic_set(&bus->intstatus, 0);
2477	} else if (val) {
2478		for_each_set_bit(n, &val, 32)
2479			set_bit(n, (unsigned long *)&bus->intstatus.counter);
2480	}
2481
2482	return ret;
2483}
2484
2485static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
2486{
2487	u32 newstatus = 0;
2488	unsigned long intstatus;
2489	uint rxlimit = bus->rxbound;	/* Rx frames to read before resched */
2490	uint txlimit = bus->txbound;	/* Tx frames to send before resched */
2491	uint framecnt = 0;	/* Temporary counter of tx/rx frames */
2492	int err = 0, n;
2493
2494	brcmf_dbg(TRACE, "Enter\n");
2495
2496	sdio_claim_host(bus->sdiodev->func[1]);
2497
2498	/* If waiting for HTAVAIL, check status */
2499	if (!bus->sr_enabled && bus->clkstate == CLK_PENDING) {
2500		u8 clkctl, devctl = 0;
2501
2502#ifdef DEBUG
2503		/* Check for inconsistent device control */
2504		devctl = brcmf_sdiod_regrb(bus->sdiodev,
2505					   SBSDIO_DEVICE_CTL, &err);
2506#endif				/* DEBUG */
2507
2508		/* Read CSR, if clock on switch to AVAIL, else ignore */
2509		clkctl = brcmf_sdiod_regrb(bus->sdiodev,
2510					   SBSDIO_FUNC1_CHIPCLKCSR, &err);
2511
2512		brcmf_dbg(SDIO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
2513			  devctl, clkctl);
2514
2515		if (SBSDIO_HTAV(clkctl)) {
2516			devctl = brcmf_sdiod_regrb(bus->sdiodev,
2517						   SBSDIO_DEVICE_CTL, &err);
2518			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
2519			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
2520					  devctl, &err);
2521			bus->clkstate = CLK_AVAIL;
2522		}
2523	}
2524
2525	/* Make sure backplane clock is on */
2526	brcmf_sdio_bus_sleep(bus, false, true);
2527
2528	/* Pending interrupt indicates new device status */
2529	if (atomic_read(&bus->ipend) > 0) {
2530		atomic_set(&bus->ipend, 0);
2531		err = brcmf_sdio_intr_rstatus(bus);
2532	}
2533
2534	/* Start with leftover status bits */
2535	intstatus = atomic_xchg(&bus->intstatus, 0);
2536
2537	/* Handle flow-control change: read new state in case our ack
2538	 * crossed another change interrupt.  If change still set, assume
2539	 * FC ON for safety, let next loop through do the debounce.
2540	 */
2541	if (intstatus & I_HMB_FC_CHANGE) {
2542		intstatus &= ~I_HMB_FC_CHANGE;
2543		err = w_sdreg32(bus, I_HMB_FC_CHANGE,
2544				offsetof(struct sdpcmd_regs, intstatus));
2545
2546		err = r_sdreg32(bus, &newstatus,
2547				offsetof(struct sdpcmd_regs, intstatus));
2548		bus->sdcnt.f1regdata += 2;
2549		atomic_set(&bus->fcstate,
2550			   !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)));
2551		intstatus |= (newstatus & bus->hostintmask);
2552	}
2553
2554	/* Handle host mailbox indication */
2555	if (intstatus & I_HMB_HOST_INT) {
2556		intstatus &= ~I_HMB_HOST_INT;
2557		intstatus |= brcmf_sdio_hostmail(bus);
2558	}
2559
2560	sdio_release_host(bus->sdiodev->func[1]);
2561
2562	/* Generally don't ask for these, can get CRC errors... */
2563	if (intstatus & I_WR_OOSYNC) {
2564		brcmf_err("Dongle reports WR_OOSYNC\n");
2565		intstatus &= ~I_WR_OOSYNC;
2566	}
2567
2568	if (intstatus & I_RD_OOSYNC) {
2569		brcmf_err("Dongle reports RD_OOSYNC\n");
2570		intstatus &= ~I_RD_OOSYNC;
2571	}
2572
2573	if (intstatus & I_SBINT) {
2574		brcmf_err("Dongle reports SBINT\n");
2575		intstatus &= ~I_SBINT;
2576	}
2577
2578	/* Would be active due to wake-wlan in gSPI */
2579	if (intstatus & I_CHIPACTIVE) {
2580		brcmf_dbg(INFO, "Dongle reports CHIPACTIVE\n");
2581		intstatus &= ~I_CHIPACTIVE;
2582	}
2583
2584	/* Ignore frame indications if rxskip is set */
2585	if (bus->rxskip)
2586		intstatus &= ~I_HMB_FRAME_IND;
2587
2588	/* On frame indication, read available frames */
2589	if (PKT_AVAILABLE() && bus->clkstate == CLK_AVAIL) {
2590		framecnt = brcmf_sdio_readframes(bus, rxlimit);
2591		if (!bus->rxpending)
2592			intstatus &= ~I_HMB_FRAME_IND;
2593		rxlimit -= min(framecnt, rxlimit);
2594	}
2595
2596	/* Keep still-pending events for next scheduling */
2597	if (intstatus) {
2598		for_each_set_bit(n, &intstatus, 32)
2599			set_bit(n, (unsigned long *)&bus->intstatus.counter);
2600	}
2601
2602	brcmf_sdio_clrintr(bus);
2603
2604	if (data_ok(bus) && bus->ctrl_frame_stat &&
2605		(bus->clkstate == CLK_AVAIL)) {
2606		int i;
2607
2608		sdio_claim_host(bus->sdiodev->func[1]);
2609		err = brcmf_sdiod_send_buf(bus->sdiodev, bus->ctrl_frame_buf,
2610					   (u32)bus->ctrl_frame_len);
2611
2612		if (err < 0) {
2613			/* On failure, abort the command and
2614				terminate the frame */
2615			brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
2616				  err);
2617			bus->sdcnt.tx_sderrs++;
2618
2619			brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
2620
2621			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
2622					  SFC_WF_TERM, &err);
2623			bus->sdcnt.f1regdata++;
2624
2625			for (i = 0; i < 3; i++) {
2626				u8 hi, lo;
2627				hi = brcmf_sdiod_regrb(bus->sdiodev,
2628						       SBSDIO_FUNC1_WFRAMEBCHI,
2629						       &err);
2630				lo = brcmf_sdiod_regrb(bus->sdiodev,
2631						       SBSDIO_FUNC1_WFRAMEBCLO,
2632						       &err);
2633				bus->sdcnt.f1regdata += 2;
2634				if ((hi == 0) && (lo == 0))
2635					break;
2636			}
2637
2638		} else {
2639			bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
2640		}
2641		sdio_release_host(bus->sdiodev->func[1]);
2642		bus->ctrl_frame_stat = false;
2643		brcmf_sdio_wait_event_wakeup(bus);
2644	}
2645	/* Send queued frames (limit 1 if rx may still be pending) */
2646	else if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
2647		 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit
2648		 && data_ok(bus)) {
2649		framecnt = bus->rxpending ? min(txlimit, bus->txminmax) :
2650					    txlimit;
2651		framecnt = brcmf_sdio_sendfromq(bus, framecnt);
2652		txlimit -= framecnt;
2653	}
2654
2655	if (!brcmf_bus_ready(bus->sdiodev->bus_if) || (err != 0)) {
2656		brcmf_err("failed backplane access over SDIO, halting operation\n");
2657		atomic_set(&bus->intstatus, 0);
2658	} else if (atomic_read(&bus->intstatus) ||
2659		   atomic_read(&bus->ipend) > 0 ||
2660		   (!atomic_read(&bus->fcstate) &&
2661		    brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
2662		    data_ok(bus)) || PKT_AVAILABLE()) {
2663		atomic_inc(&bus->dpc_tskcnt);
2664	}
2665
2666	/* If we're done for now, turn off clock request. */
2667	if ((bus->clkstate != CLK_PENDING)
2668	    && bus->idletime == BRCMF_IDLE_IMMEDIATE) {
2669		bus->activity = false;
2670		brcmf_dbg(SDIO, "idle state\n");
2671		sdio_claim_host(bus->sdiodev->func[1]);
2672		brcmf_sdio_bus_sleep(bus, true, false);
2673		sdio_release_host(bus->sdiodev->func[1]);
2674	}
2675}
2676
2677static struct pktq *brcmf_sdio_bus_gettxq(struct device *dev)
2678{
2679	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2680	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2681	struct brcmf_sdio *bus = sdiodev->bus;
2682
2683	return &bus->txq;
2684}
2685
2686static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
2687{
2688	int ret = -EBADE;
2689	uint datalen, prec;
2690	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2691	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2692	struct brcmf_sdio *bus = sdiodev->bus;
2693	ulong flags;
2694
2695	brcmf_dbg(TRACE, "Enter\n");
2696
2697	datalen = pkt->len;
2698
2699	/* Add space for the header */
2700	skb_push(pkt, bus->tx_hdrlen);
2701	/* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */
2702
2703	prec = prio2prec((pkt->priority & PRIOMASK));
2704
2705	/* Check for existing queue, current flow-control,
2706			 pending event, or pending clock */
2707	brcmf_dbg(TRACE, "deferring pktq len %d\n", pktq_len(&bus->txq));
2708	bus->sdcnt.fcqueued++;
2709
2710	/* Priority based enq */
2711	spin_lock_irqsave(&bus->txqlock, flags);
2712	/* reset bus_flags in packet cb */
2713	*(u16 *)(pkt->cb) = 0;
2714	if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) {
2715		skb_pull(pkt, bus->tx_hdrlen);
2716		brcmf_err("out of bus->txq !!!\n");
2717		ret = -ENOSR;
2718	} else {
2719		ret = 0;
2720	}
2721
2722	if (pktq_len(&bus->txq) >= TXHI) {
2723		bus->txoff = true;
2724		brcmf_txflowblock(bus->sdiodev->dev, true);
2725	}
2726	spin_unlock_irqrestore(&bus->txqlock, flags);
2727
2728#ifdef DEBUG
2729	if (pktq_plen(&bus->txq, prec) > qcount[prec])
2730		qcount[prec] = pktq_plen(&bus->txq, prec);
2731#endif
2732
2733	if (atomic_read(&bus->dpc_tskcnt) == 0) {
2734		atomic_inc(&bus->dpc_tskcnt);
2735		queue_work(bus->brcmf_wq, &bus->datawork);
2736	}
2737
2738	return ret;
2739}
2740
2741#ifdef DEBUG
2742#define CONSOLE_LINE_MAX	192
2743
2744static int brcmf_sdio_readconsole(struct brcmf_sdio *bus)
2745{
2746	struct brcmf_console *c = &bus->console;
2747	u8 line[CONSOLE_LINE_MAX], ch;
2748	u32 n, idx, addr;
2749	int rv;
2750
2751	/* Don't do anything until FWREADY updates console address */
2752	if (bus->console_addr == 0)
2753		return 0;
2754
2755	/* Read console log struct */
2756	addr = bus->console_addr + offsetof(struct rte_console, log_le);
2757	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&c->log_le,
2758			       sizeof(c->log_le));
2759	if (rv < 0)
2760		return rv;
2761
2762	/* Allocate console buffer (one time only) */
2763	if (c->buf == NULL) {
2764		c->bufsize = le32_to_cpu(c->log_le.buf_size);
2765		c->buf = kmalloc(c->bufsize, GFP_ATOMIC);
2766		if (c->buf == NULL)
2767			return -ENOMEM;
2768	}
2769
2770	idx = le32_to_cpu(c->log_le.idx);
2771
2772	/* Protect against corrupt value */
2773	if (idx > c->bufsize)
2774		return -EBADE;
2775
2776	/* Skip reading the console buffer if the index pointer
2777	 has not moved */
2778	if (idx == c->last)
2779		return 0;
2780
2781	/* Read the console buffer */
2782	addr = le32_to_cpu(c->log_le.buf);
2783	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, c->buf, c->bufsize);
2784	if (rv < 0)
2785		return rv;
2786
2787	while (c->last != idx) {
2788		for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
2789			if (c->last == idx) {
2790				/* This would output a partial line.
2791				 * Instead, back up
2792				 * the buffer pointer and output this
2793				 * line next time around.
2794				 */
2795				if (c->last >= n)
2796					c->last -= n;
2797				else
2798					c->last = c->bufsize - n;
2799				goto break2;
2800			}
2801			ch = c->buf[c->last];
2802			c->last = (c->last + 1) % c->bufsize;
2803			if (ch == '\n')
2804				break;
2805			line[n] = ch;
2806		}
2807
2808		if (n > 0) {
2809			if (line[n - 1] == '\r')
2810				n--;
2811			line[n] = 0;
2812			pr_debug("CONSOLE: %s\n", line);
2813		}
2814	}
2815break2:
2816
2817	return 0;
2818}
2819#endif				/* DEBUG */
2820
2821static int brcmf_sdio_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
2822{
2823	int i;
2824	int ret;
2825
2826	bus->ctrl_frame_stat = false;
2827	ret = brcmf_sdiod_send_buf(bus->sdiodev, frame, len);
2828
2829	if (ret < 0) {
2830		/* On failure, abort the command and terminate the frame */
2831		brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
2832			  ret);
2833		bus->sdcnt.tx_sderrs++;
2834
2835		brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
2836
2837		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
2838				  SFC_WF_TERM, NULL);
2839		bus->sdcnt.f1regdata++;
2840
2841		for (i = 0; i < 3; i++) {
2842			u8 hi, lo;
2843			hi = brcmf_sdiod_regrb(bus->sdiodev,
2844					       SBSDIO_FUNC1_WFRAMEBCHI, NULL);
2845			lo = brcmf_sdiod_regrb(bus->sdiodev,
2846					       SBSDIO_FUNC1_WFRAMEBCLO, NULL);
2847			bus->sdcnt.f1regdata += 2;
2848			if (hi == 0 && lo == 0)
2849				break;
2850		}
2851		return ret;
2852	}
2853
2854	bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
2855
2856	return ret;
2857}
2858
2859static int
2860brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2861{
2862	u8 *frame;
2863	u16 len, pad;
2864	uint retries = 0;
2865	u8 doff = 0;
2866	int ret = -1;
2867	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2868	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2869	struct brcmf_sdio *bus = sdiodev->bus;
2870	struct brcmf_sdio_hdrinfo hd_info = {0};
2871
2872	brcmf_dbg(TRACE, "Enter\n");
2873
2874	/* Back the pointer to make a room for bus header */
2875	frame = msg - bus->tx_hdrlen;
2876	len = (msglen += bus->tx_hdrlen);
2877
2878	/* Add alignment padding (optional for ctl frames) */
2879	doff = ((unsigned long)frame % bus->head_align);
2880	if (doff) {
2881		frame -= doff;
2882		len += doff;
2883		msglen += doff;
2884		memset(frame, 0, doff + bus->tx_hdrlen);
2885	}
2886	/* precondition: doff < bus->head_align */
2887	doff += bus->tx_hdrlen;
2888
2889	/* Round send length to next SDIO block */
2890	pad = 0;
2891	if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
2892		pad = bus->blocksize - (len % bus->blocksize);
2893		if ((pad > bus->roundup) || (pad >= bus->blocksize))
2894			pad = 0;
2895	} else if (len % bus->head_align) {
2896		pad = bus->head_align - (len % bus->head_align);
2897	}
2898	len += pad;
2899
2900	/* precondition: IS_ALIGNED((unsigned long)frame, 2) */
2901
2902	/* Make sure backplane clock is on */
2903	sdio_claim_host(bus->sdiodev->func[1]);
2904	brcmf_sdio_bus_sleep(bus, false, false);
2905	sdio_release_host(bus->sdiodev->func[1]);
2906
2907	hd_info.len = (u16)msglen;
2908	hd_info.channel = SDPCM_CONTROL_CHANNEL;
2909	hd_info.dat_offset = doff;
2910	hd_info.seq_num = bus->tx_seq;
2911	hd_info.lastfrm = true;
2912	hd_info.tail_pad = pad;
2913	brcmf_sdio_hdpack(bus, frame, &hd_info);
2914
2915	if (bus->txglom)
2916		brcmf_sdio_update_hwhdr(frame, len);
2917
2918	if (!data_ok(bus)) {
2919		brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n",
2920			  bus->tx_max, bus->tx_seq);
2921		bus->ctrl_frame_stat = true;
2922		/* Send from dpc */
2923		bus->ctrl_frame_buf = frame;
2924		bus->ctrl_frame_len = len;
2925
2926		wait_event_interruptible_timeout(bus->ctrl_wait,
2927						 !bus->ctrl_frame_stat,
2928						 msecs_to_jiffies(2000));
2929
2930		if (!bus->ctrl_frame_stat) {
2931			brcmf_dbg(SDIO, "ctrl_frame_stat == false\n");
2932			ret = 0;
2933		} else {
2934			brcmf_dbg(SDIO, "ctrl_frame_stat == true\n");
2935			ret = -1;
2936		}
2937	}
2938
2939	if (ret == -1) {
2940		brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
2941				   frame, len, "Tx Frame:\n");
2942		brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && BRCMF_CTL_ON()) &&
2943				   BRCMF_HDRS_ON(),
2944				   frame, min_t(u16, len, 16), "TxHdr:\n");
2945
2946		do {
2947			sdio_claim_host(bus->sdiodev->func[1]);
2948			ret = brcmf_sdio_tx_frame(bus, frame, len);
2949			sdio_release_host(bus->sdiodev->func[1]);
2950		} while (ret < 0 && retries++ < TXRETRIES);
2951	}
2952
2953	if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) &&
2954	    atomic_read(&bus->dpc_tskcnt) == 0) {
2955		bus->activity = false;
2956		sdio_claim_host(bus->sdiodev->func[1]);
2957		brcmf_dbg(INFO, "idle\n");
2958		brcmf_sdio_clkctl(bus, CLK_NONE, true);
2959		sdio_release_host(bus->sdiodev->func[1]);
2960	}
2961
2962	if (ret)
2963		bus->sdcnt.tx_ctlerrs++;
2964	else
2965		bus->sdcnt.tx_ctlpkts++;
2966
2967	return ret ? -EIO : 0;
2968}
2969
2970#ifdef DEBUG
2971static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
2972				   struct sdpcm_shared *sh, char __user *data,
2973				   size_t count)
2974{
2975	u32 addr, console_ptr, console_size, console_index;
2976	char *conbuf = NULL;
2977	__le32 sh_val;
2978	int rv;
2979	loff_t pos = 0;
2980	int nbytes = 0;
2981
2982	/* obtain console information from device memory */
2983	addr = sh->console_addr + offsetof(struct rte_console, log_le);
2984	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr,
2985			       (u8 *)&sh_val, sizeof(u32));
2986	if (rv < 0)
2987		return rv;
2988	console_ptr = le32_to_cpu(sh_val);
2989
2990	addr = sh->console_addr + offsetof(struct rte_console, log_le.buf_size);
2991	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr,
2992			       (u8 *)&sh_val, sizeof(u32));
2993	if (rv < 0)
2994		return rv;
2995	console_size = le32_to_cpu(sh_val);
2996
2997	addr = sh->console_addr + offsetof(struct rte_console, log_le.idx);
2998	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr,
2999			       (u8 *)&sh_val, sizeof(u32));
3000	if (rv < 0)
3001		return rv;
3002	console_index = le32_to_cpu(sh_val);
3003
3004	/* allocate buffer for console data */
3005	if (console_size <= CONSOLE_BUFFER_MAX)
3006		conbuf = vzalloc(console_size+1);
3007
3008	if (!conbuf)
3009		return -ENOMEM;
3010
3011	/* obtain the console data from device */
3012	conbuf[console_size] = '\0';
3013	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, console_ptr, (u8 *)conbuf,
3014			       console_size);
3015	if (rv < 0)
3016		goto done;
3017
3018	rv = simple_read_from_buffer(data, count, &pos,
3019				     conbuf + console_index,
3020				     console_size - console_index);
3021	if (rv < 0)
3022		goto done;
3023
3024	nbytes = rv;
3025	if (console_index > 0) {
3026		pos = 0;
3027		rv = simple_read_from_buffer(data+nbytes, count, &pos,
3028					     conbuf, console_index - 1);
3029		if (rv < 0)
3030			goto done;
3031		rv += nbytes;
3032	}
3033done:
3034	vfree(conbuf);
3035	return rv;
3036}
3037
3038static int brcmf_sdio_trap_info(struct brcmf_sdio *bus, struct sdpcm_shared *sh,
3039				char __user *data, size_t count)
3040{
3041	int error, res;
3042	char buf[350];
3043	struct brcmf_trap_info tr;
3044	loff_t pos = 0;
3045
3046	if ((sh->flags & SDPCM_SHARED_TRAP) == 0) {
3047		brcmf_dbg(INFO, "no trap in firmware\n");
3048		return 0;
3049	}
3050
3051	error = brcmf_sdiod_ramrw(bus->sdiodev, false, sh->trap_addr, (u8 *)&tr,
3052				  sizeof(struct brcmf_trap_info));
3053	if (error < 0)
3054		return error;
3055
3056	res = scnprintf(buf, sizeof(buf),
3057			"dongle trap info: type 0x%x @ epc 0x%08x\n"
3058			"  cpsr 0x%08x spsr 0x%08x sp 0x%08x\n"
3059			"  lr   0x%08x pc   0x%08x offset 0x%x\n"
3060			"  r0   0x%08x r1   0x%08x r2 0x%08x r3 0x%08x\n"
3061			"  r4   0x%08x r5   0x%08x r6 0x%08x r7 0x%08x\n",
3062			le32_to_cpu(tr.type), le32_to_cpu(tr.epc),
3063			le32_to_cpu(tr.cpsr), le32_to_cpu(tr.spsr),
3064			le32_to_cpu(tr.r13), le32_to_cpu(tr.r14),
3065			le32_to_cpu(tr.pc), sh->trap_addr,
3066			le32_to_cpu(tr.r0), le32_to_cpu(tr.r1),
3067			le32_to_cpu(tr.r2), le32_to_cpu(tr.r3),
3068			le32_to_cpu(tr.r4), le32_to_cpu(tr.r5),
3069			le32_to_cpu(tr.r6), le32_to_cpu(tr.r7));
3070
3071	return simple_read_from_buffer(data, count, &pos, buf, res);
3072}
3073
3074static int brcmf_sdio_assert_info(struct brcmf_sdio *bus,
3075				  struct sdpcm_shared *sh, char __user *data,
3076				  size_t count)
3077{
3078	int error = 0;
3079	char buf[200];
3080	char file[80] = "?";
3081	char expr[80] = "<???>";
3082	int res;
3083	loff_t pos = 0;
3084
3085	if ((sh->flags & SDPCM_SHARED_ASSERT_BUILT) == 0) {
3086		brcmf_dbg(INFO, "firmware not built with -assert\n");
3087		return 0;
3088	} else if ((sh->flags & SDPCM_SHARED_ASSERT) == 0) {
3089		brcmf_dbg(INFO, "no assert in dongle\n");
3090		return 0;
3091	}
3092
3093	sdio_claim_host(bus->sdiodev->func[1]);
3094	if (sh->assert_file_addr != 0) {
3095		error = brcmf_sdiod_ramrw(bus->sdiodev, false,
3096					  sh->assert_file_addr, (u8 *)file, 80);
3097		if (error < 0)
3098			return error;
3099	}
3100	if (sh->assert_exp_addr != 0) {
3101		error = brcmf_sdiod_ramrw(bus->sdiodev, false,
3102					  sh->assert_exp_addr, (u8 *)expr, 80);
3103		if (error < 0)
3104			return error;
3105	}
3106	sdio_release_host(bus->sdiodev->func[1]);
3107
3108	res = scnprintf(buf, sizeof(buf),
3109			"dongle assert: %s:%d: assert(%s)\n",
3110			file, sh->assert_line, expr);
3111	return simple_read_from_buffer(data, count, &pos, buf, res);
3112}
3113
3114static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
3115{
3116	int error;
3117	struct sdpcm_shared sh;
3118
3119	error = brcmf_sdio_readshared(bus, &sh);
3120
3121	if (error < 0)
3122		return error;
3123
3124	if ((sh.flags & SDPCM_SHARED_ASSERT_BUILT) == 0)
3125		brcmf_dbg(INFO, "firmware not built with -assert\n");
3126	else if (sh.flags & SDPCM_SHARED_ASSERT)
3127		brcmf_err("assertion in dongle\n");
3128
3129	if (sh.flags & SDPCM_SHARED_TRAP)
3130		brcmf_err("firmware trap in dongle\n");
3131
3132	return 0;
3133}
3134
3135static int brcmf_sdio_died_dump(struct brcmf_sdio *bus, char __user *data,
3136				size_t count, loff_t *ppos)
3137{
3138	int error = 0;
3139	struct sdpcm_shared sh;
3140	int nbytes = 0;
3141	loff_t pos = *ppos;
3142
3143	if (pos != 0)
3144		return 0;
3145
3146	error = brcmf_sdio_readshared(bus, &sh);
3147	if (error < 0)
3148		goto done;
3149
3150	error = brcmf_sdio_assert_info(bus, &sh, data, count);
3151	if (error < 0)
3152		goto done;
3153	nbytes = error;
3154
3155	error = brcmf_sdio_trap_info(bus, &sh, data+nbytes, count);
3156	if (error < 0)
3157		goto done;
3158	nbytes += error;
3159
3160	error = brcmf_sdio_dump_console(bus, &sh, data+nbytes, count);
3161	if (error < 0)
3162		goto done;
3163	nbytes += error;
3164
3165	error = nbytes;
3166	*ppos += nbytes;
3167done:
3168	return error;
3169}
3170
3171static ssize_t brcmf_sdio_forensic_read(struct file *f, char __user *data,
3172					size_t count, loff_t *ppos)
3173{
3174	struct brcmf_sdio *bus = f->private_data;
3175	int res;
3176
3177	res = brcmf_sdio_died_dump(bus, data, count, ppos);
3178	if (res > 0)
3179		*ppos += res;
3180	return (ssize_t)res;
3181}
3182
3183static const struct file_operations brcmf_sdio_forensic_ops = {
3184	.owner = THIS_MODULE,
3185	.open = simple_open,
3186	.read = brcmf_sdio_forensic_read
3187};
3188
3189static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
3190{
3191	struct brcmf_pub *drvr = bus->sdiodev->bus_if->drvr;
3192	struct dentry *dentry = brcmf_debugfs_get_devdir(drvr);
3193
3194	if (IS_ERR_OR_NULL(dentry))
3195		return;
3196
3197	debugfs_create_file("forensics", S_IRUGO, dentry, bus,
3198			    &brcmf_sdio_forensic_ops);
3199	brcmf_debugfs_create_sdio_count(drvr, &bus->sdcnt);
3200	debugfs_create_u32("console_interval", 0644, dentry,
3201			   &bus->console_interval);
3202}
3203#else
3204static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
3205{
3206	return 0;
3207}
3208
3209static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
3210{
3211}
3212#endif /* DEBUG */
3213
3214static int
3215brcmf_sdio_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
3216{
3217	int timeleft;
3218	uint rxlen = 0;
3219	bool pending;
3220	u8 *buf;
3221	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3222	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3223	struct brcmf_sdio *bus = sdiodev->bus;
3224
3225	brcmf_dbg(TRACE, "Enter\n");
3226
3227	/* Wait until control frame is available */
3228	timeleft = brcmf_sdio_dcmd_resp_wait(bus, &bus->rxlen, &pending);
3229
3230	spin_lock_bh(&bus->rxctl_lock);
3231	rxlen = bus->rxlen;
3232	memcpy(msg, bus->rxctl, min(msglen, rxlen));
3233	bus->rxctl = NULL;
3234	buf = bus->rxctl_orig;
3235	bus->rxctl_orig = NULL;
3236	bus->rxlen = 0;
3237	spin_unlock_bh(&bus->rxctl_lock);
3238	vfree(buf);
3239
3240	if (rxlen) {
3241		brcmf_dbg(CTL, "resumed on rxctl frame, got %d expected %d\n",
3242			  rxlen, msglen);
3243	} else if (timeleft == 0) {
3244		brcmf_err("resumed on timeout\n");
3245		brcmf_sdio_checkdied(bus);
3246	} else if (pending) {
3247		brcmf_dbg(CTL, "cancelled\n");
3248		return -ERESTARTSYS;
3249	} else {
3250		brcmf_dbg(CTL, "resumed for unknown reason?\n");
3251		brcmf_sdio_checkdied(bus);
3252	}
3253
3254	if (rxlen)
3255		bus->sdcnt.rx_ctlpkts++;
3256	else
3257		bus->sdcnt.rx_ctlerrs++;
3258
3259	return rxlen ? (int)rxlen : -ETIMEDOUT;
3260}
3261
3262#ifdef DEBUG
3263static bool
3264brcmf_sdio_verifymemory(struct brcmf_sdio_dev *sdiodev, u32 ram_addr,
3265			u8 *ram_data, uint ram_sz)
3266{
3267	char *ram_cmp;
3268	int err;
3269	bool ret = true;
3270	int address;
3271	int offset;
3272	int len;
3273
3274	/* read back and verify */
3275	brcmf_dbg(INFO, "Compare RAM dl & ul at 0x%08x; size=%d\n", ram_addr,
3276		  ram_sz);
3277	ram_cmp = kmalloc(MEMBLOCK, GFP_KERNEL);
3278	/* do not proceed while no memory but  */
3279	if (!ram_cmp)
3280		return true;
3281
3282	address = ram_addr;
3283	offset = 0;
3284	while (offset < ram_sz) {
3285		len = ((offset + MEMBLOCK) < ram_sz) ? MEMBLOCK :
3286		      ram_sz - offset;
3287		err = brcmf_sdiod_ramrw(sdiodev, false, address, ram_cmp, len);
3288		if (err) {
3289			brcmf_err("error %d on reading %d membytes at 0x%08x\n",
3290				  err, len, address);
3291			ret = false;
3292			break;
3293		} else if (memcmp(ram_cmp, &ram_data[offset], len)) {
3294			brcmf_err("Downloaded RAM image is corrupted, block offset is %d, len is %d\n",
3295				  offset, len);
3296			ret = false;
3297			break;
3298		}
3299		offset += len;
3300		address += len;
3301	}
3302
3303	kfree(ram_cmp);
3304
3305	return ret;
3306}
3307#else	/* DEBUG */
3308static bool
3309brcmf_sdio_verifymemory(struct brcmf_sdio_dev *sdiodev, u32 ram_addr,
3310			u8 *ram_data, uint ram_sz)
3311{
3312	return true;
3313}
3314#endif	/* DEBUG */
3315
3316static int brcmf_sdio_download_code_file(struct brcmf_sdio *bus,
3317					 const struct firmware *fw)
3318{
3319	int err;
3320
3321	brcmf_dbg(TRACE, "Enter\n");
3322
3323	err = brcmf_sdiod_ramrw(bus->sdiodev, true, bus->ci->rambase,
3324				(u8 *)fw->data, fw->size);
3325	if (err)
3326		brcmf_err("error %d on writing %d membytes at 0x%08x\n",
3327			  err, (int)fw->size, bus->ci->rambase);
3328	else if (!brcmf_sdio_verifymemory(bus->sdiodev, bus->ci->rambase,
3329					  (u8 *)fw->data, fw->size))
3330		err = -EIO;
3331
3332	return err;
3333}
3334
3335static int brcmf_sdio_download_nvram(struct brcmf_sdio *bus,
3336				     const struct firmware *nv)
3337{
3338	void *vars;
3339	u32 varsz;
3340	int address;
3341	int err;
3342
3343	brcmf_dbg(TRACE, "Enter\n");
3344
3345	vars = brcmf_nvram_strip(nv, &varsz);
3346
3347	if (vars == NULL)
3348		return -EINVAL;
3349
3350	address = bus->ci->ramsize - varsz + bus->ci->rambase;
3351	err = brcmf_sdiod_ramrw(bus->sdiodev, true, address, vars, varsz);
3352	if (err)
3353		brcmf_err("error %d on writing %d nvram bytes at 0x%08x\n",
3354			  err, varsz, address);
3355	else if (!brcmf_sdio_verifymemory(bus->sdiodev, address, vars, varsz))
3356		err = -EIO;
3357
3358	brcmf_nvram_free(vars);
3359
3360	return err;
3361}
3362
3363static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
3364{
3365	int bcmerror = -EFAULT;
3366	const struct firmware *fw;
3367	u32 rstvec;
3368
3369	sdio_claim_host(bus->sdiodev->func[1]);
3370	brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
3371
3372	/* Keep arm in reset */
3373	brcmf_chip_enter_download(bus->ci);
3374
3375	fw = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_BIN);
3376	if (fw == NULL) {
3377		bcmerror = -ENOENT;
3378		goto err;
3379	}
3380
3381	rstvec = get_unaligned_le32(fw->data);
3382	brcmf_dbg(SDIO, "firmware rstvec: %x\n", rstvec);
3383
3384	bcmerror = brcmf_sdio_download_code_file(bus, fw);
3385	release_firmware(fw);
3386	if (bcmerror) {
3387		brcmf_err("dongle image file download failed\n");
3388		goto err;
3389	}
3390
3391	fw = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_NVRAM);
3392	if (fw == NULL) {
3393		bcmerror = -ENOENT;
3394		goto err;
3395	}
3396
3397	bcmerror = brcmf_sdio_download_nvram(bus, fw);
3398	release_firmware(fw);
3399	if (bcmerror) {
3400		brcmf_err("dongle nvram file download failed\n");
3401		goto err;
3402	}
3403
3404	/* Take arm out of reset */
3405	if (!brcmf_chip_exit_download(bus->ci, rstvec)) {
3406		brcmf_err("error getting out of ARM core reset\n");
3407		goto err;
3408	}
3409
3410	/* Allow HT Clock now that the ARM is running. */
3411	brcmf_bus_change_state(bus->sdiodev->bus_if, BRCMF_BUS_LOAD);
3412	bcmerror = 0;
3413
3414err:
3415	brcmf_sdio_clkctl(bus, CLK_SDONLY, false);
3416	sdio_release_host(bus->sdiodev->func[1]);
3417	return bcmerror;
3418}
3419
3420static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
3421{
3422	int err = 0;
3423	u8 val;
3424
3425	brcmf_dbg(TRACE, "Enter\n");
3426
3427	val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, &err);
3428	if (err) {
3429		brcmf_err("error reading SBSDIO_FUNC1_WAKEUPCTRL\n");
3430		return;
3431	}
3432
3433	val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT;
3434	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, val, &err);
3435	if (err) {
3436		brcmf_err("error writing SBSDIO_FUNC1_WAKEUPCTRL\n");
3437		return;
3438	}
3439
3440	/* Add CMD14 Support */
3441	brcmf_sdiod_regwb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP,
3442			  (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
3443			   SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT),
3444			  &err);
3445	if (err) {
3446		brcmf_err("error writing SDIO_CCCR_BRCM_CARDCAP\n");
3447		return;
3448	}
3449
3450	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3451			  SBSDIO_FORCE_HT, &err);
3452	if (err) {
3453		brcmf_err("error writing SBSDIO_FUNC1_CHIPCLKCSR\n");
3454		return;
3455	}
3456
3457	/* set flag */
3458	bus->sr_enabled = true;
3459	brcmf_dbg(INFO, "SR enabled\n");
3460}
3461
3462/* enable KSO bit */
3463static int brcmf_sdio_kso_init(struct brcmf_sdio *bus)
3464{
3465	u8 val;
3466	int err = 0;
3467
3468	brcmf_dbg(TRACE, "Enter\n");
3469
3470	/* KSO bit added in SDIO core rev 12 */
3471	if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12)
3472		return 0;
3473
3474	val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err);
3475	if (err) {
3476		brcmf_err("error reading SBSDIO_FUNC1_SLEEPCSR\n");
3477		return err;
3478	}
3479
3480	if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
3481		val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN <<
3482			SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
3483		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
3484				  val, &err);
3485		if (err) {
3486			brcmf_err("error writing SBSDIO_FUNC1_SLEEPCSR\n");
3487			return err;
3488		}
3489	}
3490
3491	return 0;
3492}
3493
3494
3495static int brcmf_sdio_bus_preinit(struct device *dev)
3496{
3497	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3498	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3499	struct brcmf_sdio *bus = sdiodev->bus;
3500	uint pad_size;
3501	u32 value;
3502	int err;
3503
3504	/* the commands below use the terms tx and rx from
3505	 * a device perspective, ie. bus:txglom affects the
3506	 * bus transfers from device to host.
3507	 */
3508	if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12) {
3509		/* for sdio core rev < 12, disable txgloming */
3510		value = 0;
3511		err = brcmf_iovar_data_set(dev, "bus:txglom", &value,
3512					   sizeof(u32));
3513	} else {
3514		/* otherwise, set txglomalign */
3515		value = 4;
3516		if (sdiodev->pdata)
3517			value = sdiodev->pdata->sd_sgentry_align;
3518		/* SDIO ADMA requires at least 32 bit alignment */
3519		value = max_t(u32, value, 4);
3520		err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value,
3521					   sizeof(u32));
3522	}
3523
3524	if (err < 0)
3525		goto done;
3526
3527	bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN;
3528	if (sdiodev->sg_support) {
3529		bus->txglom = false;
3530		value = 1;
3531		pad_size = bus->sdiodev->func[2]->cur_blksize << 1;
3532		bus->txglom_sgpad = brcmu_pkt_buf_get_skb(pad_size);
3533		if (!bus->txglom_sgpad)
3534			brcmf_err("allocating txglom padding skb failed, reduced performance\n");
3535
3536		err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom",
3537					   &value, sizeof(u32));
3538		if (err < 0) {
3539			/* bus:rxglom is allowed to fail */
3540			err = 0;
3541		} else {
3542			bus->txglom = true;
3543			bus->tx_hdrlen += SDPCM_HWEXT_LEN;
3544		}
3545	}
3546	brcmf_bus_add_txhdrlen(bus->sdiodev->dev, bus->tx_hdrlen);
3547
3548done:
3549	return err;
3550}
3551
3552static int brcmf_sdio_bus_init(struct device *dev)
3553{
3554	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3555	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3556	struct brcmf_sdio *bus = sdiodev->bus;
3557	int err, ret = 0;
3558	u8 saveclk;
3559
3560	brcmf_dbg(TRACE, "Enter\n");
3561
3562	/* try to download image and nvram to the dongle */
3563	if (bus_if->state == BRCMF_BUS_DOWN) {
3564		bus->alp_only = true;
3565		err = brcmf_sdio_download_firmware(bus);
3566		if (err)
3567			return err;
3568		bus->alp_only = false;
3569	}
3570
3571	if (!bus->sdiodev->bus_if->drvr)
3572		return 0;
3573
3574	/* Start the watchdog timer */
3575	bus->sdcnt.tickcnt = 0;
3576	brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
3577
3578	sdio_claim_host(bus->sdiodev->func[1]);
3579
3580	/* Make sure backplane clock is on, needed to generate F2 interrupt */
3581	brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
3582	if (bus->clkstate != CLK_AVAIL)
3583		goto exit;
3584
3585	/* Force clocks on backplane to be sure F2 interrupt propagates */
3586	saveclk = brcmf_sdiod_regrb(bus->sdiodev,
3587				    SBSDIO_FUNC1_CHIPCLKCSR, &err);
3588	if (!err) {
3589		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3590				  (saveclk | SBSDIO_FORCE_HT), &err);
3591	}
3592	if (err) {
3593		brcmf_err("Failed to force clock for F2: err %d\n", err);
3594		goto exit;
3595	}
3596
3597	/* Enable function 2 (frame transfers) */
3598	w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
3599		  offsetof(struct sdpcmd_regs, tosbmailboxdata));
3600	err = sdio_enable_func(bus->sdiodev->func[SDIO_FUNC_2]);
3601
3602
3603	brcmf_dbg(INFO, "enable F2: err=%d\n", err);
3604
3605	/* If F2 successfully enabled, set core and enable interrupts */
3606	if (!err) {
3607		/* Set up the interrupt mask and enable interrupts */
3608		bus->hostintmask = HOSTINTMASK;
3609		w_sdreg32(bus, bus->hostintmask,
3610			  offsetof(struct sdpcmd_regs, hostintmask));
3611
3612		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_WATERMARK, 8, &err);
3613	} else {
3614		/* Disable F2 again */
3615		sdio_disable_func(bus->sdiodev->func[SDIO_FUNC_2]);
3616		ret = -ENODEV;
3617	}
3618
3619	if (brcmf_chip_sr_capable(bus->ci)) {
3620		brcmf_sdio_sr_init(bus);
3621	} else {
3622		/* Restore previous clock setting */
3623		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3624				  saveclk, &err);
3625	}
3626
3627	if (ret == 0) {
3628		ret = brcmf_sdiod_intr_register(bus->sdiodev);
3629		if (ret != 0)
3630			brcmf_err("intr register failed:%d\n", ret);
3631	}
3632
3633	/* If we didn't come up, turn off backplane clock */
3634	if (ret != 0)
3635		brcmf_sdio_clkctl(bus, CLK_NONE, false);
3636
3637exit:
3638	sdio_release_host(bus->sdiodev->func[1]);
3639
3640	return ret;
3641}
3642
3643void brcmf_sdio_isr(struct brcmf_sdio *bus)
3644{
3645	brcmf_dbg(TRACE, "Enter\n");
3646
3647	if (!bus) {
3648		brcmf_err("bus is null pointer, exiting\n");
3649		return;
3650	}
3651
3652	if (!brcmf_bus_ready(bus->sdiodev->bus_if)) {
3653		brcmf_err("bus is down. we have nothing to do\n");
3654		return;
3655	}
3656	/* Count the interrupt call */
3657	bus->sdcnt.intrcount++;
3658	if (in_interrupt())
3659		atomic_set(&bus->ipend, 1);
3660	else
3661		if (brcmf_sdio_intr_rstatus(bus)) {
3662			brcmf_err("failed backplane access\n");
3663		}
3664
3665	/* Disable additional interrupts (is this needed now)? */
3666	if (!bus->intr)
3667		brcmf_err("isr w/o interrupt configured!\n");
3668
3669	atomic_inc(&bus->dpc_tskcnt);
3670	queue_work(bus->brcmf_wq, &bus->datawork);
3671}
3672
3673static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
3674{
3675#ifdef DEBUG
3676	struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
3677#endif	/* DEBUG */
3678
3679	brcmf_dbg(TIMER, "Enter\n");
3680
3681	/* Poll period: check device if appropriate. */
3682	if (!bus->sr_enabled &&
3683	    bus->poll && (++bus->polltick >= bus->pollrate)) {
3684		u32 intstatus = 0;
3685
3686		/* Reset poll tick */
3687		bus->polltick = 0;
3688
3689		/* Check device if no interrupts */
3690		if (!bus->intr ||
3691		    (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
3692
3693			if (atomic_read(&bus->dpc_tskcnt) == 0) {
3694				u8 devpend;
3695
3696				sdio_claim_host(bus->sdiodev->func[1]);
3697				devpend = brcmf_sdiod_regrb(bus->sdiodev,
3698							    SDIO_CCCR_INTx,
3699							    NULL);
3700				sdio_release_host(bus->sdiodev->func[1]);
3701				intstatus =
3702				    devpend & (INTR_STATUS_FUNC1 |
3703					       INTR_STATUS_FUNC2);
3704			}
3705
3706			/* If there is something, make like the ISR and
3707				 schedule the DPC */
3708			if (intstatus) {
3709				bus->sdcnt.pollcnt++;
3710				atomic_set(&bus->ipend, 1);
3711
3712				atomic_inc(&bus->dpc_tskcnt);
3713				queue_work(bus->brcmf_wq, &bus->datawork);
3714			}
3715		}
3716
3717		/* Update interrupt tracking */
3718		bus->sdcnt.lastintrs = bus->sdcnt.intrcount;
3719	}
3720#ifdef DEBUG
3721	/* Poll for console output periodically */
3722	if (bus_if && bus_if->state == BRCMF_BUS_DATA &&
3723	    bus->console_interval != 0) {
3724		bus->console.count += BRCMF_WD_POLL_MS;
3725		if (bus->console.count >= bus->console_interval) {
3726			bus->console.count -= bus->console_interval;
3727			sdio_claim_host(bus->sdiodev->func[1]);
3728			/* Make sure backplane clock is on */
3729			brcmf_sdio_bus_sleep(bus, false, false);
3730			if (brcmf_sdio_readconsole(bus) < 0)
3731				/* stop on error */
3732				bus->console_interval = 0;
3733			sdio_release_host(bus->sdiodev->func[1]);
3734		}
3735	}
3736#endif				/* DEBUG */
3737
3738	/* On idle timeout clear activity flag and/or turn off clock */
3739	if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) {
3740		if (++bus->idlecount >= bus->idletime) {
3741			bus->idlecount = 0;
3742			if (bus->activity) {
3743				bus->activity = false;
3744				brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
3745			} else {
3746				brcmf_dbg(SDIO, "idle\n");
3747				sdio_claim_host(bus->sdiodev->func[1]);
3748				brcmf_sdio_bus_sleep(bus, true, false);
3749				sdio_release_host(bus->sdiodev->func[1]);
3750			}
3751		}
3752	}
3753
3754	return (atomic_read(&bus->ipend) > 0);
3755}
3756
3757static void brcmf_sdio_dataworker(struct work_struct *work)
3758{
3759	struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio,
3760					      datawork);
3761
3762	while (atomic_read(&bus->dpc_tskcnt)) {
3763		brcmf_sdio_dpc(bus);
3764		atomic_dec(&bus->dpc_tskcnt);
3765	}
3766}
3767
3768static void
3769brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
3770			     struct brcmf_chip *ci, u32 drivestrength)
3771{
3772	const struct sdiod_drive_str *str_tab = NULL;
3773	u32 str_mask;
3774	u32 str_shift;
3775	u32 base;
3776	u32 i;
3777	u32 drivestrength_sel = 0;
3778	u32 cc_data_temp;
3779	u32 addr;
3780
3781	if (!(ci->cc_caps & CC_CAP_PMU))
3782		return;
3783
3784	switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
3785	case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
3786		str_tab = sdiod_drvstr_tab1_1v8;
3787		str_mask = 0x00003800;
3788		str_shift = 11;
3789		break;
3790	case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17):
3791		str_tab = sdiod_drvstr_tab6_1v8;
3792		str_mask = 0x00001800;
3793		str_shift = 11;
3794		break;
3795	case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17):
3796		/* note: 43143 does not support tristate */
3797		i = ARRAY_SIZE(sdiod_drvstr_tab2_3v3) - 1;
3798		if (drivestrength >= sdiod_drvstr_tab2_3v3[i].strength) {
3799			str_tab = sdiod_drvstr_tab2_3v3;
3800			str_mask = 0x00000007;
3801			str_shift = 0;
3802		} else
3803			brcmf_err("Invalid SDIO Drive strength for chip %s, strength=%d\n",
3804				  ci->name, drivestrength);
3805		break;
3806	case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
3807		str_tab = sdiod_drive_strength_tab5_1v8;
3808		str_mask = 0x00003800;
3809		str_shift = 11;
3810		break;
3811	default:
3812		brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
3813			  ci->name, ci->chiprev, ci->pmurev);
3814		break;
3815	}
3816
3817	if (str_tab != NULL) {
3818		for (i = 0; str_tab[i].strength != 0; i++) {
3819			if (drivestrength >= str_tab[i].strength) {
3820				drivestrength_sel = str_tab[i].sel;
3821				break;
3822			}
3823		}
3824		base = brcmf_chip_get_chipcommon(ci)->base;
3825		addr = CORE_CC_REG(base, chipcontrol_addr);
3826		brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
3827		cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
3828		cc_data_temp &= ~str_mask;
3829		drivestrength_sel <<= str_shift;
3830		cc_data_temp |= drivestrength_sel;
3831		brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
3832
3833		brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
3834			  str_tab[i].strength, drivestrength, cc_data_temp);
3835	}
3836}
3837
3838static int brcmf_sdio_buscoreprep(void *ctx)
3839{
3840	struct brcmf_sdio_dev *sdiodev = ctx;
3841	int err = 0;
3842	u8 clkval, clkset;
3843
3844	/* Try forcing SDIO core to do ALPAvail request only */
3845	clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
3846	brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
3847	if (err) {
3848		brcmf_err("error writing for HT off\n");
3849		return err;
3850	}
3851
3852	/* If register supported, wait for ALPAvail and then force ALP */
3853	/* This may take up to 15 milliseconds */
3854	clkval = brcmf_sdiod_regrb(sdiodev,
3855				   SBSDIO_FUNC1_CHIPCLKCSR, NULL);
3856
3857	if ((clkval & ~SBSDIO_AVBITS) != clkset) {
3858		brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
3859			  clkset, clkval);
3860		return -EACCES;
3861	}
3862
3863	SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
3864					      SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
3865			!SBSDIO_ALPAV(clkval)),
3866			PMU_MAX_TRANSITION_DLY);
3867	if (!SBSDIO_ALPAV(clkval)) {
3868		brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
3869			  clkval);
3870		return -EBUSY;
3871	}
3872
3873	clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
3874	brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
3875	udelay(65);
3876
3877	/* Also, disable the extra SDIO pull-ups */
3878	brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
3879
3880	return 0;
3881}
3882
3883static void brcmf_sdio_buscore_exitdl(void *ctx, struct brcmf_chip *chip,
3884				      u32 rstvec)
3885{
3886	struct brcmf_sdio_dev *sdiodev = ctx;
3887	struct brcmf_core *core;
3888	u32 reg_addr;
3889
3890	/* clear all interrupts */
3891	core = brcmf_chip_get_core(chip, BCMA_CORE_SDIO_DEV);
3892	reg_addr = core->base + offsetof(struct sdpcmd_regs, intstatus);
3893	brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
3894
3895	if (rstvec)
3896		/* Write reset vector to address 0 */
3897		brcmf_sdiod_ramrw(sdiodev, true, 0, (void *)&rstvec,
3898				  sizeof(rstvec));
3899}
3900
3901static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr)
3902{
3903	struct brcmf_sdio_dev *sdiodev = ctx;
3904	u32 val, rev;
3905
3906	val = brcmf_sdiod_regrl(sdiodev, addr, NULL);
3907	if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
3908	    addr == CORE_CC_REG(SI_ENUM_BASE, chipid)) {
3909		rev = (val & CID_REV_MASK) >> CID_REV_SHIFT;
3910		if (rev >= 2) {
3911			val &= ~CID_ID_MASK;
3912			val |= BCM4339_CHIP_ID;
3913		}
3914	}
3915	return val;
3916}
3917
3918static void brcmf_sdio_buscore_write32(void *ctx, u32 addr, u32 val)
3919{
3920	struct brcmf_sdio_dev *sdiodev = ctx;
3921
3922	brcmf_sdiod_regwl(sdiodev, addr, val, NULL);
3923}
3924
3925static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = {
3926	.prepare = brcmf_sdio_buscoreprep,
3927	.exit_dl = brcmf_sdio_buscore_exitdl,
3928	.read32 = brcmf_sdio_buscore_read32,
3929	.write32 = brcmf_sdio_buscore_write32,
3930};
3931
3932static bool
3933brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
3934{
3935	u8 clkctl = 0;
3936	int err = 0;
3937	int reg_addr;
3938	u32 reg_val;
3939	u32 drivestrength;
3940
3941	sdio_claim_host(bus->sdiodev->func[1]);
3942
3943	pr_debug("F1 signature read @0x18000000=0x%4x\n",
3944		 brcmf_sdiod_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
3945
3946	/*
3947	 * Force PLL off until brcmf_chip_attach()
3948	 * programs PLL control regs
3949	 */
3950
3951	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3952			  BRCMF_INIT_CLKCTL1, &err);
3953	if (!err)
3954		clkctl = brcmf_sdiod_regrb(bus->sdiodev,
3955					   SBSDIO_FUNC1_CHIPCLKCSR, &err);
3956
3957	if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) {
3958		brcmf_err("ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
3959			  err, BRCMF_INIT_CLKCTL1, clkctl);
3960		goto fail;
3961	}
3962
3963	/* SDIO register access works so moving
3964	 * state from UNKNOWN to DOWN.
3965	 */
3966	brcmf_bus_change_state(bus->sdiodev->bus_if, BRCMF_BUS_DOWN);
3967
3968	bus->ci = brcmf_chip_attach(bus->sdiodev, &brcmf_sdio_buscore_ops);
3969	if (IS_ERR(bus->ci)) {
3970		brcmf_err("brcmf_chip_attach failed!\n");
3971		bus->ci = NULL;
3972		goto fail;
3973	}
3974
3975	if (brcmf_sdio_kso_init(bus)) {
3976		brcmf_err("error enabling KSO\n");
3977		goto fail;
3978	}
3979
3980	if ((bus->sdiodev->pdata) && (bus->sdiodev->pdata->drive_strength))
3981		drivestrength = bus->sdiodev->pdata->drive_strength;
3982	else
3983		drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH;
3984	brcmf_sdio_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
3985
3986	/* Get info on the SOCRAM cores... */
3987	bus->ramsize = bus->ci->ramsize;
3988	if (!(bus->ramsize)) {
3989		brcmf_err("failed to find SOCRAM memory!\n");
3990		goto fail;
3991	}
3992
3993	/* Set card control so an SDIO card reset does a WLAN backplane reset */
3994	reg_val = brcmf_sdiod_regrb(bus->sdiodev,
3995				    SDIO_CCCR_BRCM_CARDCTRL, &err);
3996	if (err)
3997		goto fail;
3998
3999	reg_val |= SDIO_CCCR_BRCM_CARDCTRL_WLANRESET;
4000
4001	brcmf_sdiod_regwb(bus->sdiodev,
4002			  SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
4003	if (err)
4004		goto fail;
4005
4006	/* set PMUControl so a backplane reset does PMU state reload */
4007	reg_addr = CORE_CC_REG(brcmf_chip_get_chipcommon(bus->ci)->base,
4008			       pmucontrol);
4009	reg_val = brcmf_sdiod_regrl(bus->sdiodev, reg_addr, &err);
4010	if (err)
4011		goto fail;
4012
4013	reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT);
4014
4015	brcmf_sdiod_regwl(bus->sdiodev, reg_addr, reg_val, &err);
4016	if (err)
4017		goto fail;
4018
4019	sdio_release_host(bus->sdiodev->func[1]);
4020
4021	brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
4022
4023	/* allocate header buffer */
4024	bus->hdrbuf = kzalloc(MAX_HDR_READ + bus->head_align, GFP_KERNEL);
4025	if (!bus->hdrbuf)
4026		return false;
4027	/* Locate an appropriately-aligned portion of hdrbuf */
4028	bus->rxhdr = (u8 *) roundup((unsigned long)&bus->hdrbuf[0],
4029				    bus->head_align);
4030
4031	/* Set the poll and/or interrupt flags */
4032	bus->intr = true;
4033	bus->poll = false;
4034	if (bus->poll)
4035		bus->pollrate = 1;
4036
4037	return true;
4038
4039fail:
4040	sdio_release_host(bus->sdiodev->func[1]);
4041	return false;
4042}
4043
4044static int
4045brcmf_sdio_watchdog_thread(void *data)
4046{
4047	struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
4048
4049	allow_signal(SIGTERM);
4050	/* Run until signal received */
4051	while (1) {
4052		if (kthread_should_stop())
4053			break;
4054		if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
4055			brcmf_sdio_bus_watchdog(bus);
4056			/* Count the tick for reference */
4057			bus->sdcnt.tickcnt++;
4058		} else
4059			break;
4060	}
4061	return 0;
4062}
4063
4064static void
4065brcmf_sdio_watchdog(unsigned long data)
4066{
4067	struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
4068
4069	if (bus->watchdog_tsk) {
4070		complete(&bus->watchdog_wait);
4071		/* Reschedule the watchdog */
4072		if (bus->wd_timer_valid)
4073			mod_timer(&bus->timer,
4074				  jiffies + BRCMF_WD_POLL_MS * HZ / 1000);
4075	}
4076}
4077
4078static struct brcmf_bus_ops brcmf_sdio_bus_ops = {
4079	.stop = brcmf_sdio_bus_stop,
4080	.preinit = brcmf_sdio_bus_preinit,
4081	.init = brcmf_sdio_bus_init,
4082	.txdata = brcmf_sdio_bus_txdata,
4083	.txctl = brcmf_sdio_bus_txctl,
4084	.rxctl = brcmf_sdio_bus_rxctl,
4085	.gettxq = brcmf_sdio_bus_gettxq,
4086};
4087
4088struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
4089{
4090	int ret;
4091	struct brcmf_sdio *bus;
4092
4093	brcmf_dbg(TRACE, "Enter\n");
4094
4095	/* Allocate private bus interface state */
4096	bus = kzalloc(sizeof(struct brcmf_sdio), GFP_ATOMIC);
4097	if (!bus)
4098		goto fail;
4099
4100	bus->sdiodev = sdiodev;
4101	sdiodev->bus = bus;
4102	skb_queue_head_init(&bus->glom);
4103	bus->txbound = BRCMF_TXBOUND;
4104	bus->rxbound = BRCMF_RXBOUND;
4105	bus->txminmax = BRCMF_TXMINMAX;
4106	bus->tx_seq = SDPCM_SEQ_WRAP - 1;
4107
4108	/* platform specific configuration:
4109	 *   alignments must be at least 4 bytes for ADMA
4110         */
4111	bus->head_align = ALIGNMENT;
4112	bus->sgentry_align = ALIGNMENT;
4113	if (sdiodev->pdata) {
4114		if (sdiodev->pdata->sd_head_align > ALIGNMENT)
4115			bus->head_align = sdiodev->pdata->sd_head_align;
4116		if (sdiodev->pdata->sd_sgentry_align > ALIGNMENT)
4117			bus->sgentry_align = sdiodev->pdata->sd_sgentry_align;
4118	}
4119
4120	INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
4121	bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq");
4122	if (bus->brcmf_wq == NULL) {
4123		brcmf_err("insufficient memory to create txworkqueue\n");
4124		goto fail;
4125	}
4126
4127	/* attempt to attach to the dongle */
4128	if (!(brcmf_sdio_probe_attach(bus))) {
4129		brcmf_err("brcmf_sdio_probe_attach failed\n");
4130		goto fail;
4131	}
4132
4133	spin_lock_init(&bus->rxctl_lock);
4134	spin_lock_init(&bus->txqlock);
4135	init_waitqueue_head(&bus->ctrl_wait);
4136	init_waitqueue_head(&bus->dcmd_resp_wait);
4137
4138	/* Set up the watchdog timer */
4139	init_timer(&bus->timer);
4140	bus->timer.data = (unsigned long)bus;
4141	bus->timer.function = brcmf_sdio_watchdog;
4142
4143	/* Initialize watchdog thread */
4144	init_completion(&bus->watchdog_wait);
4145	bus->watchdog_tsk = kthread_run(brcmf_sdio_watchdog_thread,
4146					bus, "brcmf_watchdog");
4147	if (IS_ERR(bus->watchdog_tsk)) {
4148		pr_warn("brcmf_watchdog thread failed to start\n");
4149		bus->watchdog_tsk = NULL;
4150	}
4151	/* Initialize DPC thread */
4152	atomic_set(&bus->dpc_tskcnt, 0);
4153
4154	/* Assign bus interface call back */
4155	bus->sdiodev->bus_if->dev = bus->sdiodev->dev;
4156	bus->sdiodev->bus_if->ops = &brcmf_sdio_bus_ops;
4157	bus->sdiodev->bus_if->chip = bus->ci->chip;
4158	bus->sdiodev->bus_if->chiprev = bus->ci->chiprev;
4159
4160	/* default sdio bus header length for tx packet */
4161	bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN;
4162
4163	/* Attach to the common layer, reserve hdr space */
4164	ret = brcmf_attach(bus->sdiodev->dev);
4165	if (ret != 0) {
4166		brcmf_err("brcmf_attach failed\n");
4167		goto fail;
4168	}
4169
4170	/* Allocate buffers */
4171	if (bus->sdiodev->bus_if->maxctl) {
4172		bus->rxblen =
4173		    roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN),
4174			    ALIGNMENT) + bus->head_align;
4175		bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC);
4176		if (!(bus->rxbuf)) {
4177			brcmf_err("rxbuf allocation failed\n");
4178			goto fail;
4179		}
4180	}
4181
4182	sdio_claim_host(bus->sdiodev->func[1]);
4183
4184	/* Disable F2 to clear any intermediate frame state on the dongle */
4185	sdio_disable_func(bus->sdiodev->func[SDIO_FUNC_2]);
4186
4187	bus->rxflow = false;
4188
4189	/* Done with backplane-dependent accesses, can drop clock... */
4190	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
4191
4192	sdio_release_host(bus->sdiodev->func[1]);
4193
4194	/* ...and initialize clock/power states */
4195	bus->clkstate = CLK_SDONLY;
4196	bus->idletime = BRCMF_IDLE_INTERVAL;
4197	bus->idleclock = BRCMF_IDLE_ACTIVE;
4198
4199	/* Query the F2 block size, set roundup accordingly */
4200	bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
4201	bus->roundup = min(max_roundup, bus->blocksize);
4202
4203	/* SR state */
4204	bus->sleeping = false;
4205	bus->sr_enabled = false;
4206
4207	brcmf_sdio_debugfs_create(bus);
4208	brcmf_dbg(INFO, "completed!!\n");
4209
4210	/* if firmware path present try to download and bring up bus */
4211	ret = brcmf_bus_start(bus->sdiodev->dev);
4212	if (ret != 0) {
4213		brcmf_err("dongle is not responding\n");
4214		goto fail;
4215	}
4216
4217	return bus;
4218
4219fail:
4220	brcmf_sdio_remove(bus);
4221	return NULL;
4222}
4223
4224/* Detach and free everything */
4225void brcmf_sdio_remove(struct brcmf_sdio *bus)
4226{
4227	brcmf_dbg(TRACE, "Enter\n");
4228
4229	if (bus) {
4230		/* De-register interrupt handler */
4231		brcmf_sdiod_intr_unregister(bus->sdiodev);
4232
4233		if (bus->sdiodev->bus_if->drvr) {
4234			brcmf_detach(bus->sdiodev->dev);
4235		}
4236
4237		cancel_work_sync(&bus->datawork);
4238		if (bus->brcmf_wq)
4239			destroy_workqueue(bus->brcmf_wq);
4240
4241		if (bus->ci) {
4242			if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
4243				sdio_claim_host(bus->sdiodev->func[1]);
4244				brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
4245				/* Leave the device in state where it is
4246				 * 'quiet'. This is done by putting it in
4247				 * download_state which essentially resets
4248				 * all necessary cores.
4249				 */
4250				msleep(20);
4251				brcmf_chip_enter_download(bus->ci);
4252				brcmf_sdio_clkctl(bus, CLK_NONE, false);
4253				sdio_release_host(bus->sdiodev->func[1]);
4254			}
4255			brcmf_chip_detach(bus->ci);
4256		}
4257
4258		brcmu_pkt_buf_free_skb(bus->txglom_sgpad);
4259		kfree(bus->rxbuf);
4260		kfree(bus->hdrbuf);
4261		kfree(bus);
4262	}
4263
4264	brcmf_dbg(TRACE, "Disconnected\n");
4265}
4266
4267void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick)
4268{
4269	/* Totally stop the timer */
4270	if (!wdtick && bus->wd_timer_valid) {
4271		del_timer_sync(&bus->timer);
4272		bus->wd_timer_valid = false;
4273		bus->save_ms = wdtick;
4274		return;
4275	}
4276
4277	/* don't start the wd until fw is loaded */
4278	if (bus->sdiodev->bus_if->state != BRCMF_BUS_DATA)
4279		return;
4280
4281	if (wdtick) {
4282		if (bus->save_ms != BRCMF_WD_POLL_MS) {
4283			if (bus->wd_timer_valid)
4284				/* Stop timer and restart at new value */
4285				del_timer_sync(&bus->timer);
4286
4287			/* Create timer again when watchdog period is
4288			   dynamically changed or in the first instance
4289			 */
4290			bus->timer.expires =
4291				jiffies + BRCMF_WD_POLL_MS * HZ / 1000;
4292			add_timer(&bus->timer);
4293
4294		} else {
4295			/* Re arm the timer, at last watchdog period */
4296			mod_timer(&bus->timer,
4297				jiffies + BRCMF_WD_POLL_MS * HZ / 1000);
4298		}
4299
4300		bus->wd_timer_valid = true;
4301		bus->save_ms = wdtick;
4302	}
4303}
4304