blob: efb84df585ca57b6939304d3623e398f45aab05b [file] [log] [blame]
/*
* Linux OS Independent Layer
*
* Copyright (C) 1999-2016, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
*
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* you also meet, for each linked independent module, the terms and conditions of
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
*
* <<Broadcom-WL-IPTag/Open:>>
*
* $Id: linux_osl.c 589291 2015-09-29 07:09:00Z $
*/
#define LINUX_PORT
#include <typedefs.h>
#include <bcmendian.h>
#include <linuxver.h>
#include <bcmdefs.h>
#ifdef mips
#include <asm/paccess.h>
#include <asm/cache.h>
#include <asm/r4kcache.h>
#undef ABS
#endif /* mips */
#if !defined(STBLINUX)
#if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
#include <asm/cacheflush.h>
#endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
#endif /* STBLINUX */
#include <linux/random.h>
#include <osl.h>
#include <bcmutils.h>
#include <linux/delay.h>
#include <pcicfg.h>
#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 8, 0))
#include <asm-generic/pci-dma-compat.h>
#endif
#ifdef BCM_SECURE_DMA
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/printk.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/moduleparam.h>
#include <asm/io.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
#include <stbutils.h>
#include <linux/highmem.h>
#include <linux/dma-mapping.h>
#include <asm/memory.h>
#if defined(__ARM_ARCH_7A__)
#include <arch/arm/include/asm/tlbflush.h>
#endif
#endif /* BCM_SECURE_DMA */
#include <linux/fs.h>
#if defined(STB)
#include <linux/spinlock.h>
extern spinlock_t l2x0_reg_lock;
#endif
#define PCI_CFG_RETRY 10
#define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */
#define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
#define DUMPBUFSZ 1024
/* dependancy check */
#if !defined(BCMPCIE) && defined(DHD_USE_STATIC_CTRLBUF)
#error "DHD_USE_STATIC_CTRLBUF suppored PCIE target only"
#endif /* !BCMPCIE && DHD_USE_STATIC_CTRLBUF */
#ifdef CONFIG_DHD_USE_STATIC_BUF
#ifdef DHD_USE_STATIC_CTRLBUF
#define DHD_SKB_1PAGE_BUFSIZE (PAGE_SIZE*1)
#define DHD_SKB_2PAGE_BUFSIZE (PAGE_SIZE*2)
#define DHD_SKB_4PAGE_BUFSIZE (PAGE_SIZE*4)
#define PREALLOC_FREE_MAGIC 0xFEDC
#define PREALLOC_USED_MAGIC 0xFCDE
#else
#define DHD_SKB_HDRSIZE 336
#define DHD_SKB_1PAGE_BUFSIZE ((PAGE_SIZE*1)-DHD_SKB_HDRSIZE)
#define DHD_SKB_2PAGE_BUFSIZE ((PAGE_SIZE*2)-DHD_SKB_HDRSIZE)
#define DHD_SKB_4PAGE_BUFSIZE ((PAGE_SIZE*4)-DHD_SKB_HDRSIZE)
#endif /* DHD_USE_STATIC_CTRLBUF */
#define STATIC_BUF_MAX_NUM 16
#define STATIC_BUF_SIZE (PAGE_SIZE*2)
#define STATIC_BUF_TOTAL_LEN (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)
typedef struct bcm_static_buf {
spinlock_t static_lock;
unsigned char *buf_ptr;
unsigned char buf_use[STATIC_BUF_MAX_NUM];
} bcm_static_buf_t;
static bcm_static_buf_t *bcm_static_buf = 0;
#ifdef DHD_USE_STATIC_CTRLBUF
#define STATIC_PKT_4PAGE_NUM 0
#define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE
#elif defined(ENHANCED_STATIC_BUF)
#define STATIC_PKT_4PAGE_NUM 1
#define DHD_SKB_MAX_BUFSIZE DHD_SKB_4PAGE_BUFSIZE
#else
#define STATIC_PKT_4PAGE_NUM 0
#define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE
#endif /* DHD_USE_STATIC_CTRLBUF */
#ifdef DHD_USE_STATIC_CTRLBUF
#define STATIC_PKT_1PAGE_NUM 0
#define STATIC_PKT_2PAGE_NUM 64
#else
#define STATIC_PKT_1PAGE_NUM 8
#define STATIC_PKT_2PAGE_NUM 8
#endif /* DHD_USE_STATIC_CTRLBUF */
#define STATIC_PKT_1_2PAGE_NUM \
((STATIC_PKT_1PAGE_NUM) + (STATIC_PKT_2PAGE_NUM))
#define STATIC_PKT_MAX_NUM \
((STATIC_PKT_1_2PAGE_NUM) + (STATIC_PKT_4PAGE_NUM))
typedef struct bcm_static_pkt {
#ifdef DHD_USE_STATIC_CTRLBUF
struct sk_buff *skb_8k[STATIC_PKT_2PAGE_NUM];
unsigned char pkt_invalid[STATIC_PKT_2PAGE_NUM];
spinlock_t osl_pkt_lock;
uint32 last_allocated_index;
#else
struct sk_buff *skb_4k[STATIC_PKT_1PAGE_NUM];
struct sk_buff *skb_8k[STATIC_PKT_2PAGE_NUM];
#ifdef ENHANCED_STATIC_BUF
struct sk_buff *skb_16k;
#endif /* ENHANCED_STATIC_BUF */
struct semaphore osl_pkt_sem;
#endif /* DHD_USE_STATIC_CTRLBUF */
unsigned char pkt_use[STATIC_PKT_MAX_NUM];
} bcm_static_pkt_t;
static bcm_static_pkt_t *bcm_static_skb = 0;
void* wifi_platform_prealloc(void *adapter, int section, unsigned long size);
#endif /* CONFIG_DHD_USE_STATIC_BUF */
typedef struct bcm_mem_link {
struct bcm_mem_link *prev;
struct bcm_mem_link *next;
uint size;
int line;
void *osh;
char file[BCM_MEM_FILENAME_LEN];
} bcm_mem_link_t;
struct osl_cmn_info {
atomic_t malloced;
atomic_t pktalloced; /* Number of allocated packet buffers */
spinlock_t dbgmem_lock;
bcm_mem_link_t *dbgmem_list;
spinlock_t pktalloc_lock;
atomic_t refcount; /* Number of references to this shared structure. */
};
typedef struct osl_cmn_info osl_cmn_t;
struct osl_info {
osl_pubinfo_t pub;
uint32 flags; /* If specific cases to be handled in the OSL */
uint magic;
void *pdev;
uint failed;
uint bustype;
osl_cmn_t *cmn; /* Common OSL related data shred between two OSH's */
void *bus_handle;
#ifdef BCM_SECURE_DMA
struct sec_mem_elem *sec_list_4096;
struct sec_mem_elem *sec_list_base_4096;
phys_addr_t contig_base;
void *contig_base_va;
phys_addr_t contig_base_alloc;
void *contig_base_alloc_va;
phys_addr_t contig_base_alloc_coherent;
void *contig_base_alloc_coherent_va;
void *contig_base_coherent_va;
phys_addr_t contig_delta_va_pa;
struct {
phys_addr_t pa;
void *va;
bool avail;
} sec_cma_coherent[SEC_CMA_COHERENT_MAX];
int stb_ext_params;
#endif /* BCM_SECURE_DMA */
};
#ifdef BCM_SECURE_DMA
static void * osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size,
bool iscache, bool isdecr);
static void osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size);
static int osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max,
sec_mem_elem_t **list);
static void osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max,
void *sec_list_base);
static sec_mem_elem_t * osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size,
int direction, struct sec_cma_info *ptr_cma_info, uint offset);
static void osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem);
static void osl_sec_dma_init_consistent(osl_t *osh);
static void *osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits,
ulong *pap);
static void osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa);
#endif /* BCM_SECURE_DMA */
#define OSL_PKTTAG_CLEAR(p) \
do { \
struct sk_buff *s = (struct sk_buff *)(p); \
ASSERT(OSL_PKTTAG_SZ == 32); \
*(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \
*(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
*(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
*(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
} while (0)
/* PCMCIA attribute space access macros */
/* Global ASSERT type flag */
uint32 g_assert_type = 0;
module_param(g_assert_type, int, 0);
#ifdef BCM_SECURE_DMA
#define SECDMA_MODULE_PARAMS 0
#define SECDMA_EXT_FILE 1
unsigned long secdma_addr = 0;
unsigned long secdma_addr2 = 0;
u32 secdma_size = 0;
u32 secdma_size2 = 0;
module_param(secdma_addr, ulong, 0);
module_param(secdma_size, int, 0);
module_param(secdma_addr2, ulong, 0);
module_param(secdma_size2, int, 0);
static int secdma_found = 0;
#endif /* BCM_SECURE_DMA */
static int16 linuxbcmerrormap[] =
{ 0, /* 0 */
-EINVAL, /* BCME_ERROR */
-EINVAL, /* BCME_BADARG */
-EINVAL, /* BCME_BADOPTION */
-EINVAL, /* BCME_NOTUP */
-EINVAL, /* BCME_NOTDOWN */
-EINVAL, /* BCME_NOTAP */
-EINVAL, /* BCME_NOTSTA */
-EINVAL, /* BCME_BADKEYIDX */
-EINVAL, /* BCME_RADIOOFF */
-EINVAL, /* BCME_NOTBANDLOCKED */
-EINVAL, /* BCME_NOCLK */
-EINVAL, /* BCME_BADRATESET */
-EINVAL, /* BCME_BADBAND */
-E2BIG, /* BCME_BUFTOOSHORT */
-E2BIG, /* BCME_BUFTOOLONG */
-EBUSY, /* BCME_BUSY */
-EINVAL, /* BCME_NOTASSOCIATED */
-EINVAL, /* BCME_BADSSIDLEN */
-EINVAL, /* BCME_OUTOFRANGECHAN */
-EINVAL, /* BCME_BADCHAN */
-EFAULT, /* BCME_BADADDR */
-ENOMEM, /* BCME_NORESOURCE */
-EOPNOTSUPP, /* BCME_UNSUPPORTED */
-EMSGSIZE, /* BCME_BADLENGTH */
-EINVAL, /* BCME_NOTREADY */
-EPERM, /* BCME_EPERM */
-ENOMEM, /* BCME_NOMEM */
-EINVAL, /* BCME_ASSOCIATED */
-ERANGE, /* BCME_RANGE */
-EINVAL, /* BCME_NOTFOUND */
-EINVAL, /* BCME_WME_NOT_ENABLED */
-EINVAL, /* BCME_TSPEC_NOTFOUND */
-EINVAL, /* BCME_ACM_NOTSUPPORTED */
-EINVAL, /* BCME_NOT_WME_ASSOCIATION */
-EIO, /* BCME_SDIO_ERROR */
-ENODEV, /* BCME_DONGLE_DOWN */
-EINVAL, /* BCME_VERSION */
-EIO, /* BCME_TXFAIL */
-EIO, /* BCME_RXFAIL */
-ENODEV, /* BCME_NODEVICE */
-EINVAL, /* BCME_NMODE_DISABLED */
-ENODATA, /* BCME_NONRESIDENT */
-EINVAL, /* BCME_SCANREJECT */
-EINVAL, /* BCME_USAGE_ERROR */
-EIO, /* BCME_IOCTL_ERROR */
-EIO, /* BCME_SERIAL_PORT_ERR */
-EOPNOTSUPP, /* BCME_DISABLED, BCME_NOTENABLED */
-EIO, /* BCME_DECERR */
-EIO, /* BCME_ENCERR */
-EIO, /* BCME_MICERR */
-ERANGE, /* BCME_REPLAY */
-EINVAL, /* BCME_IE_NOTFOUND */
-EINVAL, /* BCME_DATA_NOTFOUND */
/* When an new error code is added to bcmutils.h, add os
* specific error translation here as well
*/
/* check if BCME_LAST changed since the last time this function was updated */
#if BCME_LAST != -53
#error "You need to add a OS error translation in the linuxbcmerrormap \
for new error code defined in bcmutils.h"
#endif
};
uint lmtest = FALSE;
/* translate bcmerrors into linux errors */
int
osl_error(int bcmerror)
{
if (bcmerror > 0)
bcmerror = 0;
else if (bcmerror < BCME_LAST)
bcmerror = BCME_ERROR;
/* Array bounds covered by ASSERT in osl_attach */
return linuxbcmerrormap[-bcmerror];
}
osl_t *
#ifdef SHARED_OSL_CMN
osl_attach(void *pdev, uint bustype, bool pkttag, void **osl_cmn)
#else
osl_attach(void *pdev, uint bustype, bool pkttag)
#endif /* SHARED_OSL_CMN */
{
#ifndef SHARED_OSL_CMN
void **osl_cmn = NULL;
#endif /* SHARED_OSL_CMN */
osl_t *osh;
gfp_t flags;
#ifdef BCM_SECURE_DMA
u32 secdma_memsize;
#endif
flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
if (!(osh = kmalloc(sizeof(osl_t), flags)))
return osh;
ASSERT(osh);
bzero(osh, sizeof(osl_t));
if (osl_cmn == NULL || *osl_cmn == NULL) {
if (!(osh->cmn = kmalloc(sizeof(osl_cmn_t), flags))) {
kfree(osh);
return NULL;
}
bzero(osh->cmn, sizeof(osl_cmn_t));
if (osl_cmn)
*osl_cmn = osh->cmn;
atomic_set(&osh->cmn->malloced, 0);
osh->cmn->dbgmem_list = NULL;
spin_lock_init(&(osh->cmn->dbgmem_lock));
spin_lock_init(&(osh->cmn->pktalloc_lock));
} else {
osh->cmn = *osl_cmn;
}
atomic_add(1, &osh->cmn->refcount);
bcm_object_trace_init();
/* Check that error map has the right number of entries in it */
ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
osh->failed = 0;
osh->pdev = pdev;
osh->pub.pkttag = pkttag;
osh->bustype = bustype;
osh->magic = OS_HANDLE_MAGIC;
#ifdef BCM_SECURE_DMA
if ((secdma_addr != 0) && (secdma_size != 0)) {
printk("linux_osl.c: Buffer info passed via module params, using it.\n");
if (secdma_found == 0) {
osh->contig_base_alloc = (phys_addr_t)secdma_addr;
secdma_memsize = secdma_size;
} else if (secdma_found == 1) {
osh->contig_base_alloc = (phys_addr_t)secdma_addr2;
secdma_memsize = secdma_size2;
} else {
printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found);
kfree(osh);
return NULL;
}
osh->contig_base = (phys_addr_t)osh->contig_base_alloc;
printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize);
printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
(unsigned int)osh->contig_base_alloc);
osh->stb_ext_params = SECDMA_MODULE_PARAMS;
}
else if (stbpriv_init(osh) == 0) {
printk("linux_osl.c: stbpriv.txt found. Get buffer info.\n");
if (secdma_found == 0) {
osh->contig_base_alloc =
(phys_addr_t)bcm_strtoul(stbparam_get("secdma_cma_addr"), NULL, 0);
secdma_memsize = bcm_strtoul(stbparam_get("secdma_cma_size"), NULL, 0);
} else if (secdma_found == 1) {
osh->contig_base_alloc =
(phys_addr_t)bcm_strtoul(stbparam_get("secdma_cma_addr2"), NULL, 0);
secdma_memsize = bcm_strtoul(stbparam_get("secdma_cma_size2"), NULL, 0);
} else {
printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found);
kfree(osh);
return NULL;
}
osh->contig_base = (phys_addr_t)osh->contig_base_alloc;
printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize);
printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
(unsigned int)osh->contig_base_alloc);
osh->stb_ext_params = SECDMA_EXT_FILE;
}
else {
printk("linux_osl.c: secDMA no longer supports internal buffer allocation.\n");
kfree(osh);
return NULL;
}
secdma_found++;
osh->contig_base_alloc_coherent_va = osl_sec_dma_ioremap(osh,
phys_to_page((u32)osh->contig_base_alloc),
CMA_DMA_DESC_MEMBLOCK, FALSE, TRUE);
if (osh->contig_base_alloc_coherent_va == NULL) {
if (osh->cmn)
kfree(osh->cmn);
kfree(osh);
return NULL;
}
osh->contig_base_coherent_va = osh->contig_base_alloc_coherent_va;
osh->contig_base_alloc_coherent = osh->contig_base_alloc;
osl_sec_dma_init_consistent(osh);
osh->contig_base_alloc += CMA_DMA_DESC_MEMBLOCK;
osh->contig_base_alloc_va = osl_sec_dma_ioremap(osh,
phys_to_page((u32)osh->contig_base_alloc), CMA_DMA_DATA_MEMBLOCK, TRUE, FALSE);
if (osh->contig_base_alloc_va == NULL) {
osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
if (osh->cmn)
kfree(osh->cmn);
kfree(osh);
return NULL;
}
osh->contig_base_va = osh->contig_base_alloc_va;
if (BCME_OK != osl_sec_dma_init_elem_mem_block(osh,
CMA_BUFSIZE_4K, CMA_BUFNUM, &osh->sec_list_4096)) {
osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_DMA_DATA_MEMBLOCK);
if (osh->cmn)
kfree(osh->cmn);
kfree(osh);
return NULL;
}
osh->sec_list_base_4096 = osh->sec_list_4096;
#endif /* BCM_SECURE_DMA */
switch (bustype) {
case PCI_BUS:
case SI_BUS:
case PCMCIA_BUS:
osh->pub.mmbus = TRUE;
break;
case JTAG_BUS:
case SDIO_BUS:
case USB_BUS:
case SPI_BUS:
case RPC_BUS:
osh->pub.mmbus = FALSE;
break;
default:
ASSERT(FALSE);
break;
}
return osh;
}
int osl_static_mem_init(osl_t *osh, void *adapter)
{
#ifdef CONFIG_DHD_USE_STATIC_BUF
if (!bcm_static_buf && adapter) {
if (!(bcm_static_buf = (bcm_static_buf_t *)wifi_platform_prealloc(adapter,
3, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) {
printk("can not alloc static buf!\n");
bcm_static_skb = NULL;
ASSERT(osh->magic == OS_HANDLE_MAGIC);
return -ENOMEM;
} else {
printk("alloc static buf at %p!\n", bcm_static_buf);
}
spin_lock_init(&bcm_static_buf->static_lock);
bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
}
#if defined(DHD_USE_STATIC_CTRLBUF)
if (!bcm_static_skb && adapter) {
int i;
void *skb_buff_ptr = 0;
bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
skb_buff_ptr = wifi_platform_prealloc(adapter, 4, 0);
if (!skb_buff_ptr) {
printk("cannot alloc static buf!\n");
bcm_static_buf = NULL;
bcm_static_skb = NULL;
ASSERT(osh->magic == OS_HANDLE_MAGIC);
return -ENOMEM;
}
bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) *
(STATIC_PKT_MAX_NUM));
for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
bcm_static_skb->pkt_use[i] = 0;
}
#ifdef DHD_USE_STATIC_CTRLBUF
spin_lock_init(&bcm_static_skb->osl_pkt_lock);
bcm_static_skb->last_allocated_index = 0;
#else
sema_init(&bcm_static_skb->osl_pkt_sem, 1);
#endif /* DHD_USE_STATIC_CTRLBUF */
}
#endif
#endif /* CONFIG_DHD_USE_STATIC_BUF */
return 0;
}
void osl_set_bus_handle(osl_t *osh, void *bus_handle)
{
osh->bus_handle = bus_handle;
}
void* osl_get_bus_handle(osl_t *osh)
{
return osh->bus_handle;
}
void
osl_detach(osl_t *osh)
{
if (osh == NULL)
return;
#ifdef BCM_SECURE_DMA
if (osh->stb_ext_params == SECDMA_EXT_FILE)
stbpriv_exit(osh);
osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_4K, CMA_BUFNUM, osh->sec_list_base_4096);
osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_DMA_DATA_MEMBLOCK);
secdma_found--;
#endif /* BCM_SECURE_DMA */
bcm_object_trace_deinit();
ASSERT(osh->magic == OS_HANDLE_MAGIC);
atomic_sub(1, &osh->cmn->refcount);
if (atomic_read(&osh->cmn->refcount) == 0) {
kfree(osh->cmn);
}
kfree(osh);
}
int osl_static_mem_deinit(osl_t *osh, void *adapter)
{
#ifdef CONFIG_DHD_USE_STATIC_BUF
if (bcm_static_buf) {
bcm_static_buf = 0;
}
#endif /* CONFIG_DHD_USE_STATIC_BUF */
return 0;
}
static struct sk_buff *osl_alloc_skb(osl_t *osh, unsigned int len)
{
struct sk_buff *skb;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
gfp_t flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_ZONE_DMA)
flags |= GFP_ATOMIC;
#endif
skb = __dev_alloc_skb(len, flags);
#else
skb = dev_alloc_skb(len);
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
return skb;
}
/* Convert a driver packet to native(OS) packet
* In the process, packettag is zeroed out before sending up
* IP code depends on skb->cb to be setup correctly with various options
* In our case, that means it should be 0
*/
struct sk_buff * BCMFASTPATH
osl_pkt_tonative(osl_t *osh, void *pkt)
{
struct sk_buff *nskb;
if (osh->pub.pkttag)
OSL_PKTTAG_CLEAR(pkt);
/* Decrement the packet counter */
for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
atomic_sub(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced);
}
return (struct sk_buff *)pkt;
}
/* Convert a native(OS) packet to driver packet.
* In the process, native packet is destroyed, there is no copying
* Also, a packettag is zeroed out
*/
void * BCMFASTPATH
osl_pkt_frmnative(osl_t *osh, void *pkt)
{
struct sk_buff *nskb;
if (osh->pub.pkttag)
OSL_PKTTAG_CLEAR(pkt);
/* Increment the packet counter */
for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
atomic_add(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced);
}
return (void *)pkt;
}
/* Return a new packet. zero out pkttag */
void * BCMFASTPATH
osl_pktget(osl_t *osh, uint len)
{
struct sk_buff *skb;
uchar num = 0;
if (lmtest != FALSE) {
get_random_bytes(&num, sizeof(uchar));
if ((num + 1) <= (256 * lmtest / 100))
return NULL;
}
if ((skb = osl_alloc_skb(osh, len))) {
skb->tail += len;
skb->len += len;
skb->priority = 0;
atomic_inc(&osh->cmn->pktalloced);
}
return ((void*) skb);
}
/* Free the driver packet. Free the tag if present */
void BCMFASTPATH
osl_pktfree(osl_t *osh, void *p, bool send)
{
struct sk_buff *skb, *nskb;
if (osh == NULL)
return;
skb = (struct sk_buff*) p;
if (send && osh->pub.tx_fn)
osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE);
#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_CTRLBUF)
if (skb && (skb->mac_len == PREALLOC_USED_MAGIC)) {
printk("%s: pkt %p is from static pool\n",
__FUNCTION__, p);
dump_stack();
return;
}
if (skb && (skb->mac_len == PREALLOC_FREE_MAGIC)) {
printk("%s: pkt %p is from static pool and not in used\n",
__FUNCTION__, p);
dump_stack();
return;
}
#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_CTRLBUF */
/* perversion: we use skb->next to chain multi-skb packets */
while (skb) {
nskb = skb->next;
skb->next = NULL;
{
dev_kfree_skb_any(skb);
}
atomic_dec(&osh->cmn->pktalloced);
skb = nskb;
}
}
#ifdef CONFIG_DHD_USE_STATIC_BUF
void*
osl_pktget_static(osl_t *osh, uint len)
{
int i = 0;
struct sk_buff *skb;
#ifdef DHD_USE_STATIC_CTRLBUF
unsigned long flags;
#endif /* DHD_USE_STATIC_CTRLBUF */
if (!bcm_static_skb)
return osl_pktget(osh, len);
if (len > DHD_SKB_MAX_BUFSIZE) {
printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len);
return osl_pktget(osh, len);
}
#ifdef DHD_USE_STATIC_CTRLBUF
spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags);
if (len <= DHD_SKB_2PAGE_BUFSIZE) {
uint32 index;
for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) {
index = bcm_static_skb->last_allocated_index % STATIC_PKT_2PAGE_NUM;
bcm_static_skb->last_allocated_index++;
if (bcm_static_skb->skb_8k[index] &&
bcm_static_skb->pkt_use[index] == 0) {
break;
}
}
if ((i != STATIC_PKT_2PAGE_NUM) &&
(index >= 0) && (index < STATIC_PKT_2PAGE_NUM)) {
bcm_static_skb->pkt_use[index] = 1;
skb = bcm_static_skb->skb_8k[index];
skb->data = skb->head;
#ifdef NET_SKBUFF_DATA_USES_OFFSET
skb_set_tail_pointer(skb, NET_SKB_PAD);
#else
skb->tail = skb->data + NET_SKB_PAD;
#endif /* NET_SKBUFF_DATA_USES_OFFSET */
skb->data += NET_SKB_PAD;
skb->cloned = 0;
skb->priority = 0;
#ifdef NET_SKBUFF_DATA_USES_OFFSET
skb_set_tail_pointer(skb, len);
#else
skb->tail = skb->data + len;
#endif /* NET_SKBUFF_DATA_USES_OFFSET */
skb->len = len;
skb->mac_len = PREALLOC_USED_MAGIC;
spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
return skb;
}
}
spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
printk("%s: all static pkt in use!\n", __FUNCTION__);
return NULL;
#else
down(&bcm_static_skb->osl_pkt_sem);
if (len <= DHD_SKB_1PAGE_BUFSIZE) {
for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
if (bcm_static_skb->skb_4k[i] &&
bcm_static_skb->pkt_use[i] == 0) {
break;
}
}
if (i != STATIC_PKT_MAX_NUM) {
bcm_static_skb->pkt_use[i] = 1;
skb = bcm_static_skb->skb_4k[i];
#ifdef NET_SKBUFF_DATA_USES_OFFSET
skb_set_tail_pointer(skb, len);
#else
skb->tail = skb->data + len;
#endif /* NET_SKBUFF_DATA_USES_OFFSET */
skb->len = len;
up(&bcm_static_skb->osl_pkt_sem);
return skb;
}
}
if (len <= DHD_SKB_2PAGE_BUFSIZE) {
for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
if (bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM] &&
bcm_static_skb->pkt_use[i] == 0) {
break;
}
}
if ((i >= STATIC_PKT_1PAGE_NUM) && (i < STATIC_PKT_1_2PAGE_NUM)) {
bcm_static_skb->pkt_use[i] = 1;
skb = bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM];
#ifdef NET_SKBUFF_DATA_USES_OFFSET
skb_set_tail_pointer(skb, len);
#else
skb->tail = skb->data + len;
#endif /* NET_SKBUFF_DATA_USES_OFFSET */
skb->len = len;
up(&bcm_static_skb->osl_pkt_sem);
return skb;
}
}
#if defined(ENHANCED_STATIC_BUF)
if (bcm_static_skb->skb_16k &&
bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] == 0) {
bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 1;
skb = bcm_static_skb->skb_16k;
#ifdef NET_SKBUFF_DATA_USES_OFFSET
skb_set_tail_pointer(skb, len);
#else
skb->tail = skb->data + len;
#endif /* NET_SKBUFF_DATA_USES_OFFSET */
skb->len = len;
up(&bcm_static_skb->osl_pkt_sem);
return skb;
}
#endif /* ENHANCED_STATIC_BUF */
up(&bcm_static_skb->osl_pkt_sem);
printk("%s: all static pkt in use!\n", __FUNCTION__);
return osl_pktget(osh, len);
#endif /* DHD_USE_STATIC_CTRLBUF */
}
void
osl_pktfree_static(osl_t *osh, void *p, bool send)
{
int i;
#ifdef DHD_USE_STATIC_CTRLBUF
struct sk_buff *skb = (struct sk_buff *)p;
unsigned long flags;
#endif /* DHD_USE_STATIC_CTRLBUF */
if (!p) {
return;
}
if (!bcm_static_skb) {
osl_pktfree(osh, p, send);
return;
}
#ifdef DHD_USE_STATIC_CTRLBUF
spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags);
for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) {
if (p == bcm_static_skb->skb_8k[i]) {
if (bcm_static_skb->pkt_use[i] == 0) {
printk("%s: static pkt idx %d(%p) is double free\n",
__FUNCTION__, i, p);
} else {
bcm_static_skb->pkt_use[i] = 0;
}
if (skb->mac_len != PREALLOC_USED_MAGIC) {
printk("%s: static pkt idx %d(%p) is not in used\n",
__FUNCTION__, i, p);
}
skb->mac_len = PREALLOC_FREE_MAGIC;
spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
return;
}
}
spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
printk("%s: packet %p does not exist in the pool\n", __FUNCTION__, p);
#else
down(&bcm_static_skb->osl_pkt_sem);
for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) {
if (p == bcm_static_skb->skb_4k[i]) {
bcm_static_skb->pkt_use[i] = 0;
up(&bcm_static_skb->osl_pkt_sem);
return;
}
}
for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
if (p == bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM]) {
bcm_static_skb->pkt_use[i] = 0;
up(&bcm_static_skb->osl_pkt_sem);
return;
}
}
#ifdef ENHANCED_STATIC_BUF
if (p == bcm_static_skb->skb_16k) {
bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 0;
up(&bcm_static_skb->osl_pkt_sem);
return;
}
#endif
up(&bcm_static_skb->osl_pkt_sem);
osl_pktfree(osh, p, send);
#endif /* DHD_USE_STATIC_CTRLBUF */
}
#endif /* CONFIG_DHD_USE_STATIC_BUF */
uint32
osl_pci_read_config(osl_t *osh, uint offset, uint size)
{
uint val = 0;
uint retry = PCI_CFG_RETRY;
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
/* only 4byte access supported */
ASSERT(size == 4);
do {
pci_read_config_dword(osh->pdev, offset, &val);
if (val != 0xffffffff)
break;
} while (retry--);
return (val);
}
void
osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
{
uint retry = PCI_CFG_RETRY;
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
/* only 4byte access supported */
ASSERT(size == 4);
do {
pci_write_config_dword(osh->pdev, offset, val);
if (offset != PCI_BAR0_WIN)
break;
if (osl_pci_read_config(osh, offset, size) == val)
break;
} while (retry--);
}
/* return bus # for the pci device pointed by osh->pdev */
uint
osl_pci_bus(osl_t *osh)
{
ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
#if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
#else
return ((struct pci_dev *)osh->pdev)->bus->number;
#endif
}
/* return slot # for the pci device pointed by osh->pdev */
uint
osl_pci_slot(osl_t *osh)
{
ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
#if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn) + 1;
#else
return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
#endif
}
/* return domain # for the pci device pointed by osh->pdev */
uint
osl_pcie_domain(osl_t *osh)
{
ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
}
/* return bus # for the pci device pointed by osh->pdev */
uint
osl_pcie_bus(osl_t *osh)
{
ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
return ((struct pci_dev *)osh->pdev)->bus->number;
}
/* return the pci device pointed by osh->pdev */
struct pci_dev *
osl_pci_device(osl_t *osh)
{
ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
return osh->pdev;
}
static void
osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
{
}
void
osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
{
osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
}
void
osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
{
osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
}
void *
osl_malloc(osl_t *osh, uint size)
{
void *addr;
gfp_t flags;
/* only ASSERT if osh is defined */
if (osh)
ASSERT(osh->magic == OS_HANDLE_MAGIC);
#ifdef CONFIG_DHD_USE_STATIC_BUF
if (bcm_static_buf)
{
unsigned long irq_flags;
int i = 0;
if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE))
{
spin_lock_irqsave(&bcm_static_buf->static_lock, irq_flags);
for (i = 0; i < STATIC_BUF_MAX_NUM; i++)
{
if (bcm_static_buf->buf_use[i] == 0)
break;
}
if (i == STATIC_BUF_MAX_NUM)
{
spin_unlock_irqrestore(&bcm_static_buf->static_lock, irq_flags);
printk("all static buff in use!\n");
goto original;
}
bcm_static_buf->buf_use[i] = 1;
spin_unlock_irqrestore(&bcm_static_buf->static_lock, irq_flags);
bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
if (osh)
atomic_add(size, &osh->cmn->malloced);
return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
}
}
original:
#endif /* CONFIG_DHD_USE_STATIC_BUF */
flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
if ((addr = kmalloc(size, flags)) == NULL) {
if (osh)
osh->failed++;
return (NULL);
}
if (osh && osh->cmn)
atomic_add(size, &osh->cmn->malloced);
return (addr);
}
void *
osl_mallocz(osl_t *osh, uint size)
{
void *ptr;
ptr = osl_malloc(osh, size);
if (ptr != NULL) {
bzero(ptr, size);
}
return ptr;
}
void
osl_mfree(osl_t *osh, void *addr, uint size)
{
#ifdef CONFIG_DHD_USE_STATIC_BUF
unsigned long flags;
if (bcm_static_buf)
{
if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr
<= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN)))
{
int buf_idx = 0;
buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE;
spin_lock_irqsave(&bcm_static_buf->static_lock, flags);
bcm_static_buf->buf_use[buf_idx] = 0;
spin_unlock_irqrestore(&bcm_static_buf->static_lock, flags);
if (osh && osh->cmn) {
ASSERT(osh->magic == OS_HANDLE_MAGIC);
atomic_sub(size, &osh->cmn->malloced);
}
return;
}
}
#endif /* CONFIG_DHD_USE_STATIC_BUF */
if (osh && osh->cmn) {
ASSERT(osh->magic == OS_HANDLE_MAGIC);
ASSERT(size <= osl_malloced(osh));
atomic_sub(size, &osh->cmn->malloced);
}
kfree(addr);
}
uint
osl_check_memleak(osl_t *osh)
{
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
if (atomic_read(&osh->cmn->refcount) == 1)
return (atomic_read(&osh->cmn->malloced));
else
return 0;
}
uint
osl_malloced(osl_t *osh)
{
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
return (atomic_read(&osh->cmn->malloced));
}
uint
osl_malloc_failed(osl_t *osh)
{
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
return (osh->failed);
}
uint
osl_dma_consistent_align(void)
{
return (PAGE_SIZE);
}
void*
osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, dmaaddr_t *pap)
{
void *va;
uint16 align = (1 << align_bits);
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
size += align;
*alloced = size;
#ifndef BCM_SECURE_DMA
#if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO);
if (va)
*pap = (ulong)__virt_to_phys((ulong)va);
#else
{
dma_addr_t pap_lin;
struct pci_dev *hwdev = osh->pdev;
gfp_t flags;
#ifdef DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL
flags = GFP_ATOMIC;
#else
flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
#endif /* DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL */
va = dma_alloc_coherent(&hwdev->dev, size, &pap_lin, flags);
*pap = (dmaaddr_t)pap_lin;
}
#endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
#else
va = osl_sec_dma_alloc_consistent(osh, size, align_bits, pap);
#endif /* BCM_SECURE_DMA */
return va;
}
void
osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
{
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
#ifndef BCM_SECURE_DMA
#if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
kfree(va);
#else
pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
#endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
#else
osl_sec_dma_free_consistent(osh, va, size, pa);
#endif /* BCM_SECURE_DMA */
}
dmaaddr_t BCMFASTPATH
osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
{
int dir;
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
return (pci_map_single(osh->pdev, va, size, dir));
}
void BCMFASTPATH
osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction)
{
int dir;
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
}
/* OSL function for CPU relax */
inline void BCMFASTPATH
osl_cpu_relax(void)
{
cpu_relax();
}
#if defined(mips)
inline void BCMFASTPATH
osl_cache_flush(void *va, uint size)
{
unsigned long l = ROUNDDN((unsigned long)va, L1_CACHE_BYTES);
unsigned long e = ROUNDUP((unsigned long)(va+size), L1_CACHE_BYTES);
while (l < e)
{
flush_dcache_line(l); /* Hit_Writeback_Inv_D */
l += L1_CACHE_BYTES; /* next cache line base */
}
}
inline void BCMFASTPATH
osl_cache_inv(void *va, uint size)
{
unsigned long l = ROUNDDN((unsigned long)va, L1_CACHE_BYTES);
unsigned long e = ROUNDUP((unsigned long)(va+size), L1_CACHE_BYTES);
while (l < e)
{
invalidate_dcache_line(l); /* Hit_Invalidate_D */
l += L1_CACHE_BYTES; /* next cache line base */
}
}
inline void osl_prefetch(const void *ptr)
{
__asm__ __volatile__(".set mips4\npref %0,(%1)\n.set mips0\n"::"i" (Pref_Load), "r" (ptr));
}
#elif (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING))
inline void BCMFASTPATH
osl_cache_flush(void *va, uint size)
{
if (size > 0)
dma_sync_single_for_device(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_TX);
}
inline void BCMFASTPATH
osl_cache_inv(void *va, uint size)
{
dma_sync_single_for_cpu(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_RX);
}
inline void osl_prefetch(const void *ptr)
{
__asm__ __volatile__("pld\t%0" :: "o"(*(const char *)ptr) : "cc");
}
int osl_arch_is_coherent(void)
{
return 0;
}
inline int osl_acp_war_enab(void)
{
return 0;
}
#endif /* mips */
void
osl_delay(uint usec)
{
uint d;
while (usec > 0) {
d = MIN(usec, 1000);
udelay(d);
usec -= d;
}
}
void
osl_sleep(uint ms)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
if (ms < 20)
usleep_range(ms*1000, ms*1000 + 1000);
else
#endif
msleep(ms);
}
/* Clone a packet.
* The pkttag contents are NOT cloned.
*/
void *
osl_pktdup(osl_t *osh, void *skb)
{
void * p;
ASSERT(!PKTISCHAINED(skb));
/* clear the CTFBUF flag if set and map the rest of the buffer
* before cloning.
*/
PKTCTFMAP(osh, skb);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
if ((p = pskb_copy((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
#else
if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
#endif
return NULL;
/* Clear PKTC context */
PKTSETCLINK(p, NULL);
PKTCCLRFLAGS(p);
PKTCSETCNT(p, 1);
PKTCSETLEN(p, PKTLEN(osh, skb));
/* skb_clone copies skb->cb.. we don't want that */
if (osh->pub.pkttag)
OSL_PKTTAG_CLEAR(p);
/* Increment the packet counter */
atomic_inc(&osh->cmn->pktalloced);
return (p);
}
/*
* OSLREGOPS specifies the use of osl_XXX routines to be used for register access
*/
/*
* BINOSL selects the slightly slower function-call-based binary compatible osl.
*/
uint
osl_pktalloced(osl_t *osh)
{
if (atomic_read(&osh->cmn->refcount) == 1)
return (atomic_read(&osh->cmn->pktalloced));
else
return 0;
}
uint32
osl_rand(void)
{
uint32 rand;
get_random_bytes(&rand, sizeof(rand));
return rand;
}
/* Linux Kernel: File Operations: start */
void *
osl_os_open_image(char *filename)
{
struct file *fp;
fp = filp_open(filename, O_RDONLY, 0);
/*
* 2.6.11 (FC4) supports filp_open() but later revs don't?
* Alternative:
* fp = open_namei(AT_FDCWD, filename, O_RD, 0);
* ???
*/
if (IS_ERR(fp))
fp = NULL;
return fp;
}
int
osl_os_get_image_block(char *buf, int len, void *image)
{
struct file *fp = (struct file *)image;
int rdlen;
if (!image)
return 0;
rdlen = kernel_read(fp, fp->f_pos, buf, len);
if (rdlen > 0)
fp->f_pos += rdlen;
return rdlen;
}
void
osl_os_close_image(void *image)
{
if (image)
filp_close((struct file *)image, NULL);
}
int
osl_os_image_size(void *image)
{
int len = 0, curroffset;
if (image) {
/* store the current offset */
curroffset = generic_file_llseek(image, 0, 1);
/* goto end of file to get length */
len = generic_file_llseek(image, 0, 2);
/* restore back the offset */
generic_file_llseek(image, curroffset, 0);
}
return len;
}
/* Linux Kernel: File Operations: end */
#if (defined(STB) && defined(__arm__))
inline void osl_pcie_rreg(osl_t *osh, ulong addr, void *v, uint size)
{
unsigned long flags = 0;
int pci_access = 0;
if (osh && BUSTYPE(osh->bustype) == PCI_BUS)
pci_access = 1;
if (pci_access && ACP_WAR_ENAB())
spin_lock_irqsave(&l2x0_reg_lock, flags);
switch (size) {
case sizeof(uint8):
*(uint8*)v = readb((volatile uint8*)(addr));
break;
case sizeof(uint16):
*(uint16*)v = readw((volatile uint16*)(addr));
break;
case sizeof(uint32):
*(uint32*)v = readl((volatile uint32*)(addr));
break;
case sizeof(uint64):
*(uint64*)v = *((volatile uint64*)(addr));
break;
}
if (pci_access && ACP_WAR_ENAB())
spin_unlock_irqrestore(&l2x0_reg_lock, flags);
}
#endif
/* APIs to set/get specific quirks in OSL layer */
void
osl_flag_set(osl_t *osh, uint32 mask)
{
osh->flags |= mask;
}
#if defined(STB)
inline bool BCMFASTPATH
#else
bool
#endif
osl_is_flag_set(osl_t *osh, uint32 mask)
{
return (osh->flags & mask);
}
#ifdef BCM_SECURE_DMA
static void *
osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size, bool iscache, bool isdecr)
{
struct page **map;
int order, i;
void *addr = NULL;
size = PAGE_ALIGN(size);
order = get_order(size);
map = kmalloc(sizeof(struct page *) << order, GFP_ATOMIC);
if (map == NULL)
return NULL;
for (i = 0; i < (size >> PAGE_SHIFT); i++)
map[i] = page + i;
if (iscache) {
addr = vmap(map, size >> PAGE_SHIFT, VM_MAP, __pgprot(PAGE_KERNEL));
if (isdecr) {
osh->contig_delta_va_pa = (phys_addr_t)(addr - page_to_phys(page));
}
}
else {
#if defined(__ARM_ARCH_7A__)
addr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
pgprot_noncached(__pgprot(PAGE_KERNEL)));
#endif
if (isdecr) {
osh->contig_delta_va_pa = (phys_addr_t)(addr - page_to_phys(page));
}
}
kfree(map);
return (void *)addr;
}
static void
osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size)
{
vunmap(contig_base_va);
}
static int
osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max, sec_mem_elem_t **list)
{
int i;
int ret = BCME_OK;
sec_mem_elem_t *sec_mem_elem;
if ((sec_mem_elem = kmalloc(sizeof(sec_mem_elem_t)*(max), GFP_ATOMIC)) != NULL) {
*list = sec_mem_elem;
bzero(sec_mem_elem, sizeof(sec_mem_elem_t)*(max));
for (i = 0; i < max-1; i++) {
sec_mem_elem->next = (sec_mem_elem + 1);
sec_mem_elem->size = mbsize;
sec_mem_elem->pa_cma = osh->contig_base_alloc;
sec_mem_elem->vac = osh->contig_base_alloc_va;
sec_mem_elem->pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
osh->contig_base_alloc += mbsize;
osh->contig_base_alloc_va = ((uint8 *)osh->contig_base_alloc_va + mbsize);
sec_mem_elem = sec_mem_elem + 1;
}
sec_mem_elem->next = NULL;
sec_mem_elem->size = mbsize;
sec_mem_elem->pa_cma = osh->contig_base_alloc;
sec_mem_elem->vac = osh->contig_base_alloc_va;
sec_mem_elem->pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
osh->contig_base_alloc += mbsize;
osh->contig_base_alloc_va = ((uint8 *)osh->contig_base_alloc_va + mbsize);
} else {
printf("%s sec mem elem kmalloc failed\n", __FUNCTION__);
ret = BCME_ERROR;
}
return ret;
}
static void
osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max, void *sec_list_base)
{
if (sec_list_base)
kfree(sec_list_base);
}
static sec_mem_elem_t * BCMFASTPATH
osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size, int direction,
struct sec_cma_info *ptr_cma_info, uint offset)
{
sec_mem_elem_t *sec_mem_elem = NULL;
ASSERT(osh->sec_list_4096);
sec_mem_elem = osh->sec_list_4096;
osh->sec_list_4096 = sec_mem_elem->next;
sec_mem_elem->next = NULL;
if (ptr_cma_info->sec_alloc_list_tail) {
ptr_cma_info->sec_alloc_list_tail->next = sec_mem_elem;
ptr_cma_info->sec_alloc_list_tail = sec_mem_elem;
}
else {
/* First allocation: If tail is NULL, sec_alloc_list MUST also be NULL */
ASSERT(ptr_cma_info->sec_alloc_list == NULL);
ptr_cma_info->sec_alloc_list = sec_mem_elem;
ptr_cma_info->sec_alloc_list_tail = sec_mem_elem;
}
return sec_mem_elem;
}
static void BCMFASTPATH
osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem)
{
sec_mem_elem->dma_handle = 0x0;
sec_mem_elem->va = NULL;
sec_mem_elem->next = osh->sec_list_4096;
osh->sec_list_4096 = sec_mem_elem;
}
static sec_mem_elem_t * BCMFASTPATH
osl_sec_dma_find_rem_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info, dma_addr_t dma_handle)
{
sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list;
sec_mem_elem_t *sec_prv_elem = ptr_cma_info->sec_alloc_list;
if (!sec_mem_elem) {
printk("osl_sec_dma_find_rem_elem ptr_cma_info->sec_alloc_list is NULL \n");
return NULL;
}
if (sec_mem_elem->dma_handle == dma_handle) {
ptr_cma_info->sec_alloc_list = sec_mem_elem->next;
if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail) {
ptr_cma_info->sec_alloc_list_tail = NULL;
ASSERT(ptr_cma_info->sec_alloc_list == NULL);
}
return sec_mem_elem;
}
sec_mem_elem = sec_mem_elem->next;
while (sec_mem_elem != NULL) {
if (sec_mem_elem->dma_handle == dma_handle) {
sec_prv_elem->next = sec_mem_elem->next;
if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail)
ptr_cma_info->sec_alloc_list_tail = sec_prv_elem;
return sec_mem_elem;
}
sec_prv_elem = sec_mem_elem;
sec_mem_elem = sec_mem_elem->next;
}
return NULL;
}
static sec_mem_elem_t *
osl_sec_dma_rem_first_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info)
{
sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list;
if (sec_mem_elem) {
ptr_cma_info->sec_alloc_list = sec_mem_elem->next;
if (ptr_cma_info->sec_alloc_list == NULL)
ptr_cma_info->sec_alloc_list_tail = NULL;
return sec_mem_elem;
} else
return NULL;
}
static void * BCMFASTPATH
osl_sec_dma_last_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info)
{
return ptr_cma_info->sec_alloc_list_tail;
}
dma_addr_t BCMFASTPATH
osl_sec_dma_map_txmeta(osl_t *osh, void *va, uint size, int direction, void *p,
hnddma_seg_map_t *dmah, void *ptr_cma_info)
{
sec_mem_elem_t *sec_mem_elem;
struct page *pa_cma_page;
uint loffset;
void *vaorig = ((uint8 *)va + size);
dma_addr_t dma_handle = 0x0;
/* packet will be the one added with osl_sec_dma_map() just before this call */
sec_mem_elem = osl_sec_dma_last_elem(osh, ptr_cma_info);
if (sec_mem_elem && sec_mem_elem->va == vaorig) {
pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1));
dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset, size,
(direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
} else {
printf("%s: error orig va not found va = 0x%p \n",
__FUNCTION__, vaorig);
}
return dma_handle;
}
dma_addr_t BCMFASTPATH
osl_sec_dma_map(osl_t *osh, void *va, uint size, int direction, void *p,
hnddma_seg_map_t *dmah, void *ptr_cma_info, uint offset)
{
sec_mem_elem_t *sec_mem_elem;
struct page *pa_cma_page;
void *pa_cma_kmap_va = NULL;
uint buflen = 0;
dma_addr_t dma_handle = 0x0;
uint loffset;
ASSERT((direction == DMA_RX) || (direction == DMA_TX));
sec_mem_elem = osl_sec_dma_alloc_mem_elem(osh, va, size, direction, ptr_cma_info, offset);
sec_mem_elem->va = va;
sec_mem_elem->direction = direction;
pa_cma_page = sec_mem_elem->pa_cma_page;
loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1));
/* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
* pa_cma_kmap_va += loffset;
*/
pa_cma_kmap_va = sec_mem_elem->vac;
pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + offset);
buflen = size;
if (direction == DMA_TX) {
memcpy((uint8*)pa_cma_kmap_va+offset, va, size);
if (dmah) {
dmah->nsegs = 1;
dmah->origsize = buflen;
}
}
else
{
if ((p != NULL) && (dmah != NULL)) {
dmah->nsegs = 1;
dmah->origsize = buflen;
}
}
dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset+offset, buflen,
(direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
if (dmah) {
dmah->segs[0].addr = dma_handle;
dmah->segs[0].length = buflen;
}
sec_mem_elem->dma_handle = dma_handle;
/* kunmap_atomic(pa_cma_kmap_va-loffset); */
return dma_handle;
}
dma_addr_t BCMFASTPATH
osl_sec_dma_dd_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *map)
{
struct page *pa_cma_page;
phys_addr_t pa_cma;
dma_addr_t dma_handle = 0x0;
uint loffset;
pa_cma = (phys_addr_t)(va - osh->contig_delta_va_pa);
pa_cma_page = phys_to_page(pa_cma);
loffset = pa_cma -(pa_cma & ~(PAGE_SIZE-1));
dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset, size,
(direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
return dma_handle;
}
void BCMFASTPATH
osl_sec_dma_unmap(osl_t *osh, dma_addr_t dma_handle, uint size, int direction,
void *p, hnddma_seg_map_t *map, void *ptr_cma_info, uint offset)
{
sec_mem_elem_t *sec_mem_elem;
struct page *pa_cma_page;
void *pa_cma_kmap_va = NULL;
uint buflen = 0;
dma_addr_t pa_cma;
void *va;
int read_count = 0;
BCM_REFERENCE(buflen);
BCM_REFERENCE(read_count);
sec_mem_elem = osl_sec_dma_find_rem_elem(osh, ptr_cma_info, dma_handle);
ASSERT(sec_mem_elem);
va = sec_mem_elem->va;
va = (uint8 *)va - offset;
pa_cma = sec_mem_elem->pa_cma;
pa_cma_page = sec_mem_elem->pa_cma_page;
if (direction == DMA_RX) {
if (p == NULL) {
/* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
* pa_cma_kmap_va += loffset;
*/
pa_cma_kmap_va = sec_mem_elem->vac;
dma_unmap_page(OSH_NULL, pa_cma, size, DMA_FROM_DEVICE);
memcpy(va, pa_cma_kmap_va, size);
/* kunmap_atomic(pa_cma_kmap_va); */
}
} else {
dma_unmap_page(OSH_NULL, pa_cma, size+offset, DMA_TO_DEVICE);
}
osl_sec_dma_free_mem_elem(osh, sec_mem_elem);
}
void
osl_sec_dma_unmap_all(osl_t *osh, void *ptr_cma_info)
{
sec_mem_elem_t *sec_mem_elem;
sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info);
while (sec_mem_elem != NULL) {
dma_unmap_page(OSH_NULL, sec_mem_elem->pa_cma, sec_mem_elem->size,
sec_mem_elem->direction == DMA_TX ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
osl_sec_dma_free_mem_elem(osh, sec_mem_elem);
sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info);
}
}
static void
osl_sec_dma_init_consistent(osl_t *osh)
{
int i;
void *temp_va = osh->contig_base_alloc_coherent_va;
phys_addr_t temp_pa = osh->contig_base_alloc_coherent;
for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
osh->sec_cma_coherent[i].avail = TRUE;
osh->sec_cma_coherent[i].va = temp_va;
osh->sec_cma_coherent[i].pa = temp_pa;
temp_va += SEC_CMA_COHERENT_BLK;
temp_pa += SEC_CMA_COHERENT_BLK;
}
}
static void *
osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, ulong *pap)
{
void *temp_va = NULL;
ulong temp_pa = 0;
int i;
if (size > SEC_CMA_COHERENT_BLK) {
printf("%s unsupported size\n", __FUNCTION__);
return NULL;
}
for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
if (osh->sec_cma_coherent[i].avail == TRUE) {
temp_va = osh->sec_cma_coherent[i].va;
temp_pa = osh->sec_cma_coherent[i].pa;
osh->sec_cma_coherent[i].avail = FALSE;
break;
}
}
if (i == SEC_CMA_COHERENT_MAX)
printf("%s:No coherent mem: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__,
temp_va, (ulong)temp_pa, size);
*pap = (unsigned long)temp_pa;
return temp_va;
}
static void
osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
{
int i = 0;
for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
if (osh->sec_cma_coherent[i].va == va) {
osh->sec_cma_coherent[i].avail = TRUE;
break;
}
}
if (i == SEC_CMA_COHERENT_MAX)
printf("%s:Error: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__,
va, (ulong)pa, size);
}
#endif /* BCM_SECURE_DMA */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) && defined(TSQ_MULTIPLIER)
#include <linux/kallsyms.h>
#include <net/sock.h>
void
osl_pkt_orphan_partial(struct sk_buff *skb)
{
uint32 fraction;
static void *p_tcp_wfree = NULL;
if (!skb->destructor || skb->destructor == sock_wfree)
return;
if (unlikely(!p_tcp_wfree)) {
char sym[KSYM_SYMBOL_LEN];
sprint_symbol(sym, (unsigned long)skb->destructor);
sym[9] = 0;
if (!strcmp(sym, "tcp_wfree"))
p_tcp_wfree = skb->destructor;
else
return;
}
if (unlikely(skb->destructor != p_tcp_wfree || !skb->sk))
return;
/* abstract a certain portion of skb truesize from the socket
* sk_wmem_alloc to allow more skb can be allocated for this
* socket for better cusion meeting WiFi device requirement
*/
fraction = skb->truesize * (TSQ_MULTIPLIER - 1) / TSQ_MULTIPLIER;
skb->truesize -= fraction;
atomic_sub(fraction, &skb->sk->sk_wmem_alloc);
}
#endif /* LINUX_VERSION >= 3.6.0 && TSQ_MULTIPLIER */