1/*
2 * Linux OS Independent Layer
3 *
4 * Copyright (C) 1999-2012, Broadcom Corporation
5 *
6 *      Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
11 *
12 *      As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module.  An independent module is a module which is not
17 * derived from this software.  The special exception does not apply to any
18 * modifications of the software.
19 *
20 *      Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
23 *
24 * $Id: linux_osl.c 309193 2012-01-19 00:03:57Z $
25 */
26
27#define LINUX_PORT
28
29#include <typedefs.h>
30#include <bcmendian.h>
31#include <linuxver.h>
32#include <bcmdefs.h>
33#include <osl.h>
34#include <bcmutils.h>
35#include <linux/delay.h>
36#include <pcicfg.h>
37
38#ifdef BCMASSERT_LOG
39#include <bcm_assert_log.h>
40#endif
41
42
43#include <linux/fs.h>
44
45#define PCI_CFG_RETRY 		10
46
47#define OS_HANDLE_MAGIC		0x1234abcd
48#define BCM_MEM_FILENAME_LEN 	24
49
50#ifdef CONFIG_DHD_USE_STATIC_BUF
51#define STATIC_BUF_MAX_NUM	16
52#define STATIC_BUF_SIZE	(PAGE_SIZE*2)
53#define STATIC_BUF_TOTAL_LEN	(STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)
54
55typedef struct bcm_static_buf {
56	struct semaphore static_sem;
57	unsigned char *buf_ptr;
58	unsigned char buf_use[STATIC_BUF_MAX_NUM];
59} bcm_static_buf_t;
60
61static bcm_static_buf_t *bcm_static_buf = 0;
62
63#define STATIC_PKT_MAX_NUM	8
64
65typedef struct bcm_static_pkt {
66	struct sk_buff *skb_4k[STATIC_PKT_MAX_NUM];
67	struct sk_buff *skb_8k[STATIC_PKT_MAX_NUM];
68	struct semaphore osl_pkt_sem;
69	unsigned char pkt_use[STATIC_PKT_MAX_NUM * 2];
70} bcm_static_pkt_t;
71
72static bcm_static_pkt_t *bcm_static_skb = 0;
73#endif
74
75typedef struct bcm_mem_link {
76	struct bcm_mem_link *prev;
77	struct bcm_mem_link *next;
78	uint	size;
79	int	line;
80	void 	*osh;
81	char	file[BCM_MEM_FILENAME_LEN];
82} bcm_mem_link_t;
83
84struct osl_info {
85	osl_pubinfo_t pub;
86#ifdef CTFPOOL
87	ctfpool_t *ctfpool;
88#endif
89	uint magic;
90	void *pdev;
91	atomic_t malloced;
92	uint failed;
93	uint bustype;
94	bcm_mem_link_t *dbgmem_list;
95	spinlock_t dbgmem_lock;
96	spinlock_t pktalloc_lock;
97};
98
99
100
101
102uint32 g_assert_type = FALSE;
103
104static int16 linuxbcmerrormap[] =
105{	0,
106	-EINVAL,
107	-EINVAL,
108	-EINVAL,
109	-EINVAL,
110	-EINVAL,
111	-EINVAL,
112	-EINVAL,
113	-EINVAL,
114	-EINVAL,
115	-EINVAL,
116	-EINVAL,
117	-EINVAL,
118	-EINVAL,
119	-E2BIG,
120	-E2BIG,
121	-EBUSY,
122	-EINVAL,
123	-EINVAL,
124	-EINVAL,
125	-EINVAL,
126	-EFAULT,
127	-ENOMEM,
128	-EOPNOTSUPP,
129	-EMSGSIZE,
130	-EINVAL,
131	-EPERM,
132	-ENOMEM,
133	-EINVAL,
134	-ERANGE,
135	-EINVAL,
136	-EINVAL,
137	-EINVAL,
138	-EINVAL,
139	-EINVAL,
140	-EIO,
141	-ENODEV,
142	-EINVAL,
143	-EIO,
144	-EIO,
145	-ENODEV,
146	-EINVAL,
147	-ENODATA,
148
149
150
151#if BCME_LAST != -42
152#error "You need to add a OS error translation in the linuxbcmerrormap \
153	for new error code defined in bcmutils.h"
154#endif
155};
156
157
158int
159osl_error(int bcmerror)
160{
161	if (bcmerror > 0)
162		bcmerror = 0;
163	else if (bcmerror < BCME_LAST)
164		bcmerror = BCME_ERROR;
165
166
167	return linuxbcmerrormap[-bcmerror];
168}
169
170extern uint8* dhd_os_prealloc(void *osh, int section, int size);
171
172osl_t *
173osl_attach(void *pdev, uint bustype, bool pkttag)
174{
175	osl_t *osh;
176
177	osh = kmalloc(sizeof(osl_t), GFP_ATOMIC);
178	ASSERT(osh);
179
180	bzero(osh, sizeof(osl_t));
181
182
183	ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
184
185	osh->magic = OS_HANDLE_MAGIC;
186	atomic_set(&osh->malloced, 0);
187	osh->failed = 0;
188	osh->dbgmem_list = NULL;
189	spin_lock_init(&(osh->dbgmem_lock));
190	osh->pdev = pdev;
191	osh->pub.pkttag = pkttag;
192	osh->bustype = bustype;
193
194	switch (bustype) {
195		case PCI_BUS:
196		case SI_BUS:
197		case PCMCIA_BUS:
198			osh->pub.mmbus = TRUE;
199			break;
200		case JTAG_BUS:
201		case SDIO_BUS:
202		case USB_BUS:
203		case SPI_BUS:
204		case RPC_BUS:
205			osh->pub.mmbus = FALSE;
206			break;
207		default:
208			ASSERT(FALSE);
209			break;
210	}
211
212#if defined(CONFIG_DHD_USE_STATIC_BUF)
213	if (!bcm_static_buf) {
214		if (!(bcm_static_buf = (bcm_static_buf_t *)dhd_os_prealloc(osh, 3, STATIC_BUF_SIZE+
215			STATIC_BUF_TOTAL_LEN))) {
216			printk("can not alloc static buf!\n");
217		}
218		else
219			printk("alloc static buf at %x!\n", (unsigned int)bcm_static_buf);
220
221
222		sema_init(&bcm_static_buf->static_sem, 1);
223
224		bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
225	}
226
227	if (!bcm_static_skb) {
228		int i;
229		void *skb_buff_ptr = 0;
230		bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
231		skb_buff_ptr = dhd_os_prealloc(osh, 4, 0);
232
233		bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *)*16);
234		for (i = 0; i < STATIC_PKT_MAX_NUM * 2; i++)
235			bcm_static_skb->pkt_use[i] = 0;
236
237		sema_init(&bcm_static_skb->osl_pkt_sem, 1);
238	}
239#endif
240
241	spin_lock_init(&(osh->pktalloc_lock));
242
243	return osh;
244}
245
246void
247osl_detach(osl_t *osh)
248{
249	if (osh == NULL)
250		return;
251
252#ifdef CONFIG_DHD_USE_STATIC_BUF
253		if (bcm_static_buf) {
254			bcm_static_buf = 0;
255		}
256		if (bcm_static_skb) {
257			bcm_static_skb = 0;
258		}
259#endif
260
261	ASSERT(osh->magic == OS_HANDLE_MAGIC);
262	kfree(osh);
263}
264
265static struct sk_buff *osl_alloc_skb(unsigned int len)
266{
267#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
268	gfp_t flags = GFP_ATOMIC;
269
270	return __dev_alloc_skb(len, flags);
271#else
272	return dev_alloc_skb(len);
273#endif
274}
275
276#ifdef CTFPOOL
277
278#ifdef CTFPOOL_SPINLOCK
279#define CTFPOOL_LOCK(ctfpool, flags)	spin_lock_irqsave(&(ctfpool)->lock, flags)
280#define CTFPOOL_UNLOCK(ctfpool, flags)	spin_unlock_irqrestore(&(ctfpool)->lock, flags)
281#else
282#define CTFPOOL_LOCK(ctfpool, flags)	spin_lock_bh(&(ctfpool)->lock)
283#define CTFPOOL_UNLOCK(ctfpool, flags)	spin_unlock_bh(&(ctfpool)->lock)
284#endif
285
286void *
287osl_ctfpool_add(osl_t *osh)
288{
289	struct sk_buff *skb;
290#ifdef CTFPOOL_SPINLOCK
291	unsigned long flags;
292#endif
293
294	if ((osh == NULL) || (osh->ctfpool == NULL))
295		return NULL;
296
297	CTFPOOL_LOCK(osh->ctfpool, flags);
298	ASSERT(osh->ctfpool->curr_obj <= osh->ctfpool->max_obj);
299
300
301	if (osh->ctfpool->curr_obj == osh->ctfpool->max_obj) {
302		CTFPOOL_UNLOCK(osh->ctfpool, flags);
303		return NULL;
304	}
305
306
307	skb = osl_alloc_skb(osh->ctfpool->obj_size);
308	if (skb == NULL) {
309		printf("%s: skb alloc of len %d failed\n", __FUNCTION__,
310		       osh->ctfpool->obj_size);
311		CTFPOOL_UNLOCK(osh->ctfpool, flags);
312		return NULL;
313	}
314
315
316	skb->next = (struct sk_buff *)osh->ctfpool->head;
317	osh->ctfpool->head = skb;
318	osh->ctfpool->fast_frees++;
319	osh->ctfpool->curr_obj++;
320
321
322	CTFPOOLPTR(osh, skb) = (void *)osh->ctfpool;
323
324
325	PKTFAST(osh, skb) = FASTBUF;
326
327	CTFPOOL_UNLOCK(osh->ctfpool, flags);
328
329	return skb;
330}
331
332
333void
334osl_ctfpool_replenish(osl_t *osh, uint thresh)
335{
336	if ((osh == NULL) || (osh->ctfpool == NULL))
337		return;
338
339
340	while ((osh->ctfpool->refills > 0) && (thresh--)) {
341		osl_ctfpool_add(osh);
342		osh->ctfpool->refills--;
343	}
344}
345
346
347int32
348osl_ctfpool_init(osl_t *osh, uint numobj, uint size)
349{
350	osh->ctfpool = kmalloc(sizeof(ctfpool_t), GFP_ATOMIC);
351	ASSERT(osh->ctfpool);
352	bzero(osh->ctfpool, sizeof(ctfpool_t));
353
354	osh->ctfpool->max_obj = numobj;
355	osh->ctfpool->obj_size = size;
356
357	spin_lock_init(&osh->ctfpool->lock);
358
359	while (numobj--) {
360		if (!osl_ctfpool_add(osh))
361			return -1;
362		osh->ctfpool->fast_frees--;
363	}
364
365	return 0;
366}
367
368
369void
370osl_ctfpool_cleanup(osl_t *osh)
371{
372	struct sk_buff *skb, *nskb;
373#ifdef CTFPOOL_SPINLOCK
374	unsigned long flags;
375#endif
376
377	if ((osh == NULL) || (osh->ctfpool == NULL))
378		return;
379
380	CTFPOOL_LOCK(osh->ctfpool, flags);
381
382	skb = osh->ctfpool->head;
383
384	while (skb != NULL) {
385		nskb = skb->next;
386		dev_kfree_skb(skb);
387		skb = nskb;
388		osh->ctfpool->curr_obj--;
389	}
390
391	ASSERT(osh->ctfpool->curr_obj == 0);
392	osh->ctfpool->head = NULL;
393	CTFPOOL_UNLOCK(osh->ctfpool, flags);
394
395	kfree(osh->ctfpool);
396	osh->ctfpool = NULL;
397}
398
399void
400osl_ctfpool_stats(osl_t *osh, void *b)
401{
402	struct bcmstrbuf *bb;
403
404	if ((osh == NULL) || (osh->ctfpool == NULL))
405		return;
406
407#ifdef CONFIG_DHD_USE_STATIC_BUF
408	if (bcm_static_buf) {
409		bcm_static_buf = 0;
410	}
411	if (bcm_static_skb) {
412		bcm_static_skb = 0;
413	}
414#endif
415
416	bb = b;
417
418	ASSERT((osh != NULL) && (bb != NULL));
419
420	bcm_bprintf(bb, "max_obj %d obj_size %d curr_obj %d refills %d\n",
421	            osh->ctfpool->max_obj, osh->ctfpool->obj_size,
422	            osh->ctfpool->curr_obj, osh->ctfpool->refills);
423	bcm_bprintf(bb, "fast_allocs %d fast_frees %d slow_allocs %d\n",
424	            osh->ctfpool->fast_allocs, osh->ctfpool->fast_frees,
425	            osh->ctfpool->slow_allocs);
426}
427
428static inline struct sk_buff *
429osl_pktfastget(osl_t *osh, uint len)
430{
431	struct sk_buff *skb;
432#ifdef CTFPOOL_SPINLOCK
433	unsigned long flags;
434#endif
435
436
437	if (osh->ctfpool == NULL)
438		return NULL;
439
440	CTFPOOL_LOCK(osh->ctfpool, flags);
441	if (osh->ctfpool->head == NULL) {
442		ASSERT(osh->ctfpool->curr_obj == 0);
443		osh->ctfpool->slow_allocs++;
444		CTFPOOL_UNLOCK(osh->ctfpool, flags);
445		return NULL;
446	}
447
448	ASSERT(len <= osh->ctfpool->obj_size);
449
450
451	skb = (struct sk_buff *)osh->ctfpool->head;
452	osh->ctfpool->head = (void *)skb->next;
453
454	osh->ctfpool->fast_allocs++;
455	osh->ctfpool->curr_obj--;
456	ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head);
457	CTFPOOL_UNLOCK(osh->ctfpool, flags);
458
459
460	skb->next = skb->prev = NULL;
461	skb->data = skb->head + 16;
462	skb->tail = skb->head + 16;
463
464	skb->len = 0;
465	skb->cloned = 0;
466#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
467	skb->list = NULL;
468#endif
469	atomic_set(&skb->users, 1);
470
471	return skb;
472}
473#endif
474
475struct sk_buff * BCMFASTPATH
476osl_pkt_tonative(osl_t *osh, void *pkt)
477{
478#ifndef WL_UMK
479	struct sk_buff *nskb;
480	unsigned long flags;
481#endif
482
483	if (osh->pub.pkttag)
484		bzero((void*)((struct sk_buff *)pkt)->cb, OSL_PKTTAG_SZ);
485
486#ifndef WL_UMK
487
488	for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
489		spin_lock_irqsave(&osh->pktalloc_lock, flags);
490		osh->pub.pktalloced--;
491		spin_unlock_irqrestore(&osh->pktalloc_lock, flags);
492	}
493#endif
494	return (struct sk_buff *)pkt;
495}
496
497
498void * BCMFASTPATH
499osl_pkt_frmnative(osl_t *osh, void *pkt)
500{
501#ifndef WL_UMK
502	struct sk_buff *nskb;
503	unsigned long flags;
504#endif
505
506	if (osh->pub.pkttag)
507		bzero((void*)((struct sk_buff *)pkt)->cb, OSL_PKTTAG_SZ);
508
509#ifndef WL_UMK
510
511	for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
512		spin_lock_irqsave(&osh->pktalloc_lock, flags);
513		osh->pub.pktalloced++;
514		spin_unlock_irqrestore(&osh->pktalloc_lock, flags);
515	}
516#endif
517	return (void *)pkt;
518}
519
520
521void * BCMFASTPATH
522osl_pktget(osl_t *osh, uint len)
523{
524	struct sk_buff *skb;
525	unsigned long flags;
526
527#ifdef CTFPOOL
528
529	skb = osl_pktfastget(osh, len);
530	if ((skb != NULL) || ((skb = osl_alloc_skb(len)) != NULL)) {
531#else
532	if ((skb = osl_alloc_skb(len))) {
533#endif
534		skb_put(skb, len);
535		skb->priority = 0;
536
537
538		spin_lock_irqsave(&osh->pktalloc_lock, flags);
539		osh->pub.pktalloced++;
540		spin_unlock_irqrestore(&osh->pktalloc_lock, flags);
541	}
542
543	return ((void*) skb);
544}
545
546#ifdef CTFPOOL
547static inline void
548osl_pktfastfree(osl_t *osh, struct sk_buff *skb)
549{
550	ctfpool_t *ctfpool;
551#ifdef CTFPOOL_SPINLOCK
552	unsigned long flags;
553#endif
554
555#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
556	skb->tstamp.tv.sec = 0;
557#else
558	skb->stamp.tv_sec = 0;
559#endif
560
561
562	skb->dev = NULL;
563	skb->dst = NULL;
564	memset(skb->cb, 0, sizeof(skb->cb));
565	skb->ip_summed = 0;
566	skb->destructor = NULL;
567
568	ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
569	ASSERT(ctfpool != NULL);
570
571
572	CTFPOOL_LOCK(ctfpool, flags);
573	skb->next = (struct sk_buff *)ctfpool->head;
574	ctfpool->head = (void *)skb;
575
576	ctfpool->fast_frees++;
577	ctfpool->curr_obj++;
578
579	ASSERT(ctfpool->curr_obj <= ctfpool->max_obj);
580	CTFPOOL_UNLOCK(ctfpool, flags);
581}
582#endif
583
584
585void BCMFASTPATH
586osl_pktfree(osl_t *osh, void *p, bool send)
587{
588	struct sk_buff *skb, *nskb;
589	unsigned long flags;
590
591	skb = (struct sk_buff*) p;
592
593	if (send && osh->pub.tx_fn)
594		osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
595
596	PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE);
597
598
599	while (skb) {
600		nskb = skb->next;
601		skb->next = NULL;
602
603
604
605#ifdef CTFPOOL
606		if ((PKTISFAST(osh, skb)) && (atomic_read(&skb->users) == 1))
607			osl_pktfastfree(osh, skb);
608		else {
609#else
610		{
611#endif
612
613			if (skb->destructor)
614
615				dev_kfree_skb_any(skb);
616			else
617
618				dev_kfree_skb(skb);
619		}
620		spin_lock_irqsave(&osh->pktalloc_lock, flags);
621		osh->pub.pktalloced--;
622		spin_unlock_irqrestore(&osh->pktalloc_lock, flags);
623		skb = nskb;
624	}
625}
626
627#ifdef CONFIG_DHD_USE_STATIC_BUF
628void*
629osl_pktget_static(osl_t *osh, uint len)
630{
631	int i = 0;
632	struct sk_buff *skb;
633
634	if (len > (PAGE_SIZE*2)) {
635		printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len);
636		return osl_pktget(osh, len);
637	}
638
639	down(&bcm_static_skb->osl_pkt_sem);
640
641	if (len <= PAGE_SIZE) {
642		for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
643			if (bcm_static_skb->pkt_use[i] == 0)
644				break;
645		}
646
647		if (i != STATIC_PKT_MAX_NUM) {
648			bcm_static_skb->pkt_use[i] = 1;
649			up(&bcm_static_skb->osl_pkt_sem);
650			skb = bcm_static_skb->skb_4k[i];
651			skb->tail = skb->data + len;
652			skb->len = len;
653			return skb;
654		}
655	}
656
657
658	for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
659		if (bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] == 0)
660			break;
661	}
662
663	if (i != STATIC_PKT_MAX_NUM) {
664		bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] = 1;
665		up(&bcm_static_skb->osl_pkt_sem);
666		skb = bcm_static_skb->skb_8k[i];
667		skb->tail = skb->data + len;
668		skb->len = len;
669		return skb;
670	}
671
672	up(&bcm_static_skb->osl_pkt_sem);
673	printk("%s: all static pkt in use!\n", __FUNCTION__);
674	return osl_pktget(osh, len);
675}
676
677void
678osl_pktfree_static(osl_t *osh, void *p, bool send)
679{
680	int i;
681
682	for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
683		if (p == bcm_static_skb->skb_4k[i]) {
684			down(&bcm_static_skb->osl_pkt_sem);
685			bcm_static_skb->pkt_use[i] = 0;
686			up(&bcm_static_skb->osl_pkt_sem);
687			return;
688		}
689	}
690
691	for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
692		if (p == bcm_static_skb->skb_8k[i]) {
693			down(&bcm_static_skb->osl_pkt_sem);
694			bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM] = 0;
695			up(&bcm_static_skb->osl_pkt_sem);
696			return;
697		}
698	}
699
700	return osl_pktfree(osh, p, send);
701}
702#endif
703
704uint32
705osl_pci_read_config(osl_t *osh, uint offset, uint size)
706{
707	uint val = 0;
708	uint retry = PCI_CFG_RETRY;
709
710	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
711
712
713	ASSERT(size == 4);
714
715	do {
716		pci_read_config_dword(osh->pdev, offset, &val);
717		if (val != 0xffffffff)
718			break;
719	} while (retry--);
720
721
722	return (val);
723}
724
725void
726osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
727{
728	uint retry = PCI_CFG_RETRY;
729
730	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
731
732
733	ASSERT(size == 4);
734
735	do {
736		pci_write_config_dword(osh->pdev, offset, val);
737		if (offset != PCI_BAR0_WIN)
738			break;
739		if (osl_pci_read_config(osh, offset, size) == val)
740			break;
741	} while (retry--);
742
743}
744
745
746uint
747osl_pci_bus(osl_t *osh)
748{
749	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
750
751	return ((struct pci_dev *)osh->pdev)->bus->number;
752}
753
754
755uint
756osl_pci_slot(osl_t *osh)
757{
758	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
759
760	return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
761}
762
763
764struct pci_dev *
765osl_pci_device(osl_t *osh)
766{
767	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
768
769	return osh->pdev;
770}
771
772static void
773osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
774{
775}
776
777void
778osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
779{
780	osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
781}
782
783void
784osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
785{
786	osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
787}
788
789void *
790osl_malloc(osl_t *osh, uint size)
791{
792	void *addr;
793
794
795	if (osh)
796		ASSERT(osh->magic == OS_HANDLE_MAGIC);
797
798#ifdef CONFIG_DHD_USE_STATIC_BUF
799	if (bcm_static_buf)
800	{
801		int i = 0;
802		if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE))
803		{
804			down(&bcm_static_buf->static_sem);
805
806			for (i = 0; i < STATIC_BUF_MAX_NUM; i++)
807			{
808				if (bcm_static_buf->buf_use[i] == 0)
809					break;
810			}
811
812			if (i == STATIC_BUF_MAX_NUM)
813			{
814				up(&bcm_static_buf->static_sem);
815				printk("all static buff in use!\n");
816				goto original;
817			}
818
819			bcm_static_buf->buf_use[i] = 1;
820			up(&bcm_static_buf->static_sem);
821
822			bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
823			if (osh)
824				atomic_add(size, &osh->malloced);
825
826			return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
827		}
828	}
829original:
830#endif
831
832	if ((addr = kmalloc(size, GFP_ATOMIC)) == NULL) {
833		if (osh)
834			osh->failed++;
835		return (NULL);
836	}
837	if (osh)
838		atomic_add(size, &osh->malloced);
839
840	return (addr);
841}
842
843void
844osl_mfree(osl_t *osh, void *addr, uint size)
845{
846#ifdef CONFIG_DHD_USE_STATIC_BUF
847	if (bcm_static_buf)
848	{
849		if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr
850			<= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN)))
851		{
852			int buf_idx = 0;
853
854			buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE;
855
856			down(&bcm_static_buf->static_sem);
857			bcm_static_buf->buf_use[buf_idx] = 0;
858			up(&bcm_static_buf->static_sem);
859
860			if (osh) {
861				ASSERT(osh->magic == OS_HANDLE_MAGIC);
862				atomic_sub(size, &osh->malloced);
863			}
864			return;
865		}
866	}
867#endif
868	if (osh) {
869		ASSERT(osh->magic == OS_HANDLE_MAGIC);
870		atomic_sub(size, &osh->malloced);
871	}
872	kfree(addr);
873}
874
875uint
876osl_malloced(osl_t *osh)
877{
878	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
879	return (atomic_read(&osh->malloced));
880}
881
882uint
883osl_malloc_failed(osl_t *osh)
884{
885	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
886	return (osh->failed);
887}
888
889
890uint
891osl_dma_consistent_align(void)
892{
893	return (PAGE_SIZE);
894}
895
896void*
897osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, ulong *pap)
898{
899	uint16 align = (1 << align_bits);
900	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
901
902	if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
903		size += align;
904	*alloced = size;
905
906	return (pci_alloc_consistent(osh->pdev, size, (dma_addr_t*)pap));
907}
908
909void
910osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa)
911{
912	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
913
914	pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
915}
916
917uint BCMFASTPATH
918osl_dma_map(osl_t *osh, void *va, uint size, int direction)
919{
920	int dir;
921
922	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
923	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
924	return (pci_map_single(osh->pdev, va, size, dir));
925}
926
927void BCMFASTPATH
928osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction)
929{
930	int dir;
931
932	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
933	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
934	pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
935}
936
937#if defined(BCMASSERT_LOG)
938void
939osl_assert(const char *exp, const char *file, int line)
940{
941	char tempbuf[256];
942	const char *basename;
943
944	basename = strrchr(file, '/');
945
946	if (basename)
947		basename++;
948
949	if (!basename)
950		basename = file;
951
952#ifdef BCMASSERT_LOG
953	snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n",
954		exp, basename, line);
955
956	bcm_assert_log(tempbuf);
957#endif
958
959
960}
961#endif
962
963void
964osl_delay(uint usec)
965{
966	uint d;
967
968	while (usec > 0) {
969		d = MIN(usec, 1000);
970		udelay(d);
971		usec -= d;
972	}
973}
974
975
976
977void *
978osl_pktdup(osl_t *osh, void *skb)
979{
980	void * p;
981	unsigned long irqflags;
982
983
984	PKTCTFMAP(osh, skb);
985
986	if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
987		return NULL;
988
989#ifdef CTFPOOL
990	if (PKTISFAST(osh, skb)) {
991		ctfpool_t *ctfpool;
992
993
994		ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
995		ASSERT(ctfpool != NULL);
996		PKTCLRFAST(osh, p);
997		PKTCLRFAST(osh, skb);
998		ctfpool->refills++;
999	}
1000#endif
1001
1002
1003	if (osh->pub.pkttag)
1004		bzero((void*)((struct sk_buff *)p)->cb, OSL_PKTTAG_SZ);
1005
1006
1007	spin_lock_irqsave(&osh->pktalloc_lock, irqflags);
1008	osh->pub.pktalloced++;
1009	spin_unlock_irqrestore(&osh->pktalloc_lock, irqflags);
1010	return (p);
1011}
1012
1013
1014
1015
1016
1017
1018
1019void *
1020osl_os_open_image(char *filename)
1021{
1022	struct file *fp;
1023
1024	fp = filp_open(filename, O_RDONLY, 0);
1025
1026	 if (IS_ERR(fp))
1027		 fp = NULL;
1028
1029	 return fp;
1030}
1031
1032int
1033osl_os_get_image_block(char *buf, int len, void *image)
1034{
1035	struct file *fp = (struct file *)image;
1036	int rdlen;
1037
1038	if (!image)
1039		return 0;
1040
1041	rdlen = kernel_read(fp, fp->f_pos, buf, len);
1042	if (rdlen > 0)
1043		fp->f_pos += rdlen;
1044
1045	return rdlen;
1046}
1047
1048void
1049osl_os_close_image(void *image)
1050{
1051	if (image)
1052		filp_close((struct file *)image, NULL);
1053}
1054