1/*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef	_BRCM_DMA_H_
18#define	_BRCM_DMA_H_
19
20#include <linux/delay.h>
21#include <linux/skbuff.h>
22#include "types.h"		/* forward structure declarations */
23
24/* map/unmap direction */
25#define	DMA_TX	1		/* TX direction for DMA */
26#define	DMA_RX	2		/* RX direction for DMA */
27
28/* DMA structure:
29 *  support two DMA engines: 32 bits address or 64 bit addressing
30 *  basic DMA register set is per channel(transmit or receive)
31 *  a pair of channels is defined for convenience
32 */
33
34/* 32 bits addressing */
35
36struct dma32diag {	/* diag access */
37	u32 fifoaddr;	/* diag address */
38	u32 fifodatalow;	/* low 32bits of data */
39	u32 fifodatahigh;	/* high 32bits of data */
40	u32 pad;		/* reserved */
41};
42
43/* 64 bits addressing */
44
45/* dma registers per channel(xmt or rcv) */
46struct dma64regs {
47	u32 control;	/* enable, et al */
48	u32 ptr;	/* last descriptor posted to chip */
49	u32 addrlow;	/* desc ring base address low 32-bits (8K aligned) */
50	u32 addrhigh;	/* desc ring base address bits 63:32 (8K aligned) */
51	u32 status0;	/* current descriptor, xmt state */
52	u32 status1;	/* active descriptor, xmt error */
53};
54
55/* range param for dma_getnexttxp() and dma_txreclaim */
56enum txd_range {
57	DMA_RANGE_ALL = 1,
58	DMA_RANGE_TRANSMITTED,
59	DMA_RANGE_TRANSFERED
60};
61
62/*
63 * Exported data structure (read-only)
64 */
65/* export structure */
66struct dma_pub {
67	uint txavail;		/* # free tx descriptors */
68	uint dmactrlflags;	/* dma control flags */
69
70	/* rx error counters */
71	uint rxgiants;		/* rx giant frames */
72	uint rxnobuf;		/* rx out of dma descriptors */
73	/* tx error counters */
74	uint txnobuf;		/* tx out of dma descriptors */
75};
76
77extern struct dma_pub *dma_attach(char *name, struct brcms_c_info *wlc,
78				  uint txregbase, uint rxregbase,
79				  uint ntxd, uint nrxd,
80				  uint rxbufsize, int rxextheadroom,
81				  uint nrxpost, uint rxoffset);
82
83void dma_rxinit(struct dma_pub *pub);
84int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list);
85bool dma_rxfill(struct dma_pub *pub);
86bool dma_rxreset(struct dma_pub *pub);
87bool dma_txreset(struct dma_pub *pub);
88void dma_txinit(struct dma_pub *pub);
89int dma_txfast(struct brcms_c_info *wlc, struct dma_pub *pub,
90	       struct sk_buff *p0);
91void dma_txflush(struct dma_pub *pub);
92int dma_txpending(struct dma_pub *pub);
93void dma_kick_tx(struct dma_pub *pub);
94void dma_txsuspend(struct dma_pub *pub);
95bool dma_txsuspended(struct dma_pub *pub);
96void dma_txresume(struct dma_pub *pub);
97void dma_txreclaim(struct dma_pub *pub, enum txd_range range);
98void dma_rxreclaim(struct dma_pub *pub);
99void dma_detach(struct dma_pub *pub);
100unsigned long dma_getvar(struct dma_pub *pub, const char *name);
101struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range);
102void dma_counterreset(struct dma_pub *pub);
103
104void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc)
105		      (void *pkt, void *arg_a), void *arg_a);
106
107/*
108 * DMA(Bug) on bcm47xx chips seems to declare that the packet is ready, but
109 * the packet length is not updated yet (by DMA) on the expected time.
110 * Workaround is to hold processor till DMA updates the length, and stay off
111 * the bus to allow DMA update the length in buffer
112 */
113static inline void dma_spin_for_len(uint len, struct sk_buff *head)
114{
115#if defined(CONFIG_BCM47XX)
116	if (!len) {
117		while (!(len = *(u16 *) KSEG1ADDR(head->data)))
118			udelay(1);
119
120		*(u16 *) (head->data) = cpu_to_le16((u16) len);
121	}
122#endif				/* defined(CONFIG_BCM47XX) */
123}
124
125#endif				/* _BRCM_DMA_H_ */
126