blob: 0f5c784fdb17490062c9f0db4f49ff189ac0d2a7 [file] [log] [blame]
/*
* Broadcom Dongle Host Driver (DHD), Linux-specific network interface
* Basically selected code segments from usb-cdc.c and usb-rndis.c
*
* Copyright (C) 1999-2014, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
*
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* you also meet, for each linked independent module, the terms and conditions of
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* $Id: dhd_linux.c 477711 2014-05-14 08:45:17Z $
*/
#include <typedefs.h>
#include <linuxver.h>
#include <osl.h>
#ifdef SHOW_LOGTRACE
#include <linux/syscalls.h>
#include <event_log.h>
#endif /* SHOW_LOGTRACE */
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/rtnetlink.h>
#include <linux/etherdevice.h>
#include <linux/random.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/ethtool.h>
#include <linux/fcntl.h>
#include <linux/fs.h>
#include <linux/ip.h>
#include <linux/reboot.h>
#include <linux/notifier.h>
#include <net/addrconf.h>
#ifdef ENABLE_ADAPTIVE_SCHED
#include <linux/cpufreq.h>
#endif /* ENABLE_ADAPTIVE_SCHED */
#include <asm/uaccess.h>
#include <asm/unaligned.h>
#include <epivers.h>
#include <bcmutils.h>
#include <bcmendian.h>
#include <bcmdevs.h>
#include <proto/ethernet.h>
#include <proto/bcmevent.h>
#include <proto/vlan.h>
#include <proto/bcmudp.h>
#include <proto/bcmdhcp.h>
#ifdef DHD_L2_FILTER
#include <proto/bcmicmp.h>
#endif
#include <proto/802.3.h>
#include <dngl_stats.h>
#include <dhd_linux_wq.h>
#include <dhd.h>
#include <dhd_linux.h>
#ifdef PCIE_FULL_DONGLE
#include <dhd_flowring.h>
#endif
#include <dhd_bus.h>
#include <dhd_proto.h>
#include <dhd_dbg.h>
#include <dhd_debug.h>
#ifdef CONFIG_HAS_WAKELOCK
#include <linux/wakelock.h>
#endif
#ifdef WL_CFG80211
#include <wl_cfg80211.h>
#endif
#ifdef PNO_SUPPORT
#include <dhd_pno.h>
#endif
#ifdef RTT_SUPPORT
#include <dhd_rtt.h>
#endif
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
#endif
#ifdef DHD_WMF
#include <dhd_wmf_linux.h>
#endif /* DHD_WMF */
#ifdef DHDTCPACK_SUPPRESS
#include <dhd_ip.h>
#endif /* DHDTCPACK_SUPPRESS */
#ifdef WLMEDIA_HTSF
#include <linux/time.h>
#include <htsf.h>
#define HTSF_MINLEN 200 /* min. packet length to timestamp */
#define HTSF_BUS_DELAY 150 /* assume a fix propagation in us */
#define TSMAX 1000 /* max no. of timing record kept */
#define NUMBIN 34
static uint32 tsidx = 0;
static uint32 htsf_seqnum = 0;
uint32 tsfsync;
struct timeval tsync;
static uint32 tsport = 5010;
typedef struct histo_ {
uint32 bin[NUMBIN];
} histo_t;
#if !ISPOWEROF2(DHD_SDALIGN)
#error DHD_SDALIGN is not a power of 2!
#endif
static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
#endif /* WLMEDIA_HTSF */
#if defined(SOFTAP)
extern bool ap_cfg_running;
extern bool ap_fw_loaded;
#endif
#ifdef SET_RANDOM_MAC_SOFTAP
#ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL
#define CONFIG_DHD_SET_RANDOM_MAC_VAL 0x001A11
#endif
static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL;
#endif
#ifdef ENABLE_ADAPTIVE_SCHED
#define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
#ifndef CUSTOM_CPUFREQ_THRESH
#define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH
#endif /* CUSTOM_CPUFREQ_THRESH */
#endif /* ENABLE_ADAPTIVE_SCHED */
/* enable HOSTIP cache update from the host side when an eth0:N is up */
#define AOE_IP_ALIAS_SUPPORT 1
#ifdef BCM_FD_AGGR
#include <bcm_rpc.h>
#include <bcm_rpc_tp.h>
#endif
#ifdef PROP_TXSTATUS
#include <wlfc_proto.h>
#include <dhd_wlfc.h>
#endif
#include <wl_android.h>
/* Maximum STA per radio */
#define DHD_MAX_STA 32
const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
#define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
#ifdef ARP_OFFLOAD_SUPPORT
void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
static int dhd_inetaddr_notifier_call(struct notifier_block *this,
unsigned long event, void *ptr);
static struct notifier_block dhd_inetaddr_notifier = {
.notifier_call = dhd_inetaddr_notifier_call
};
/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
* created in kernel notifier link list (with 'next' pointing to itself)
*/
static bool dhd_inetaddr_notifier_registered = FALSE;
#endif /* ARP_OFFLOAD_SUPPORT */
#ifdef CONFIG_IPV6
static int dhd_inet6addr_notifier_call(struct notifier_block *this,
unsigned long event, void *ptr);
static struct notifier_block dhd_inet6addr_notifier = {
.notifier_call = dhd_inet6addr_notifier_call
};
/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
* created in kernel notifier link list (with 'next' pointing to itself)
*/
static bool dhd_inet6addr_notifier_registered = FALSE;
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
#include <linux/suspend.h>
volatile bool dhd_mmc_suspend = FALSE;
DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
#if defined(OOB_INTR_ONLY)
extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
static void dhd_hang_process(void *dhd_info, void *event_data, u8 event);
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
MODULE_LICENSE("GPL v2");
#endif /* LinuxVer */
#include <dhd_bus.h>
#ifdef BCM_FD_AGGR
#define DBUS_RX_BUFFER_SIZE_DHD(net) (BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
#else
#ifndef PROP_TXSTATUS
#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
#else
#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
#endif
#endif /* BCM_FD_AGGR */
#ifdef PROP_TXSTATUS
extern bool dhd_wlfc_skip_fc(void);
extern void dhd_wlfc_plat_init(void *dhd);
extern void dhd_wlfc_plat_deinit(void *dhd);
#endif /* PROP_TXSTATUS */
#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
const char *
print_tainted()
{
return "";
}
#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
/* Linux wireless extension support */
#if defined(WL_WIRELESS_EXT)
#include <wl_iw.h>
extern wl_iw_extra_params_t g_wl_iw_params;
#endif /* defined(WL_WIRELESS_EXT) */
#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
#include <linux/earlysuspend.h>
#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
#ifdef PKT_FILTER_SUPPORT
extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
#endif
#ifdef READ_MACADDR
extern int dhd_read_macaddr(struct dhd_info *dhd);
#else
static inline int dhd_read_macaddr(struct dhd_info *dhd) { return 0; }
#endif
#ifdef WRITE_MACADDR
extern int dhd_write_macaddr(struct ether_addr *mac);
#else
static inline int dhd_write_macaddr(struct ether_addr *mac) { return 0; }
#endif
static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
static struct notifier_block dhd_reboot_notifier = {
.notifier_call = dhd_reboot_callback,
.priority = 1,
};
typedef struct dhd_dump {
uint8 *buf;
int bufsize;
} dhd_dump_t;
typedef struct dhd_if_event {
struct list_head list;
wl_event_data_if_t event;
char name[IFNAMSIZ+1];
uint8 mac[ETHER_ADDR_LEN];
} dhd_if_event_t;
/* Interface control information */
typedef struct dhd_if {
struct dhd_info *info; /* back pointer to dhd_info */
/* OS/stack specifics */
struct net_device *net;
int idx; /* iface idx in dongle */
uint subunit; /* subunit */
uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
bool set_macaddress;
bool set_multicast;
uint8 bssidx; /* bsscfg index for the interface */
bool attached; /* Delayed attachment when unset */
bool txflowcontrol; /* Per interface flow control indicator */
char name[IFNAMSIZ+1]; /* linux interface name */
struct net_device_stats stats;
#ifdef DHD_WMF
dhd_wmf_t wmf; /* per bsscfg wmf setting */
#endif /* DHD_WMF */
#ifdef PCIE_FULL_DONGLE
struct list_head sta_list; /* sll of associated stations */
#if !defined(BCM_GMAC3)
spinlock_t sta_list_lock; /* lock for manipulating sll */
#endif /* ! BCM_GMAC3 */
#endif /* PCIE_FULL_DONGLE */
uint32 ap_isolate; /* ap-isolation settings */
} dhd_if_t;
#ifdef WLMEDIA_HTSF
typedef struct {
uint32 low;
uint32 high;
} tsf_t;
typedef struct {
uint32 last_cycle;
uint32 last_sec;
uint32 last_tsf;
uint32 coef; /* scaling factor */
uint32 coefdec1; /* first decimal */
uint32 coefdec2; /* second decimal */
} htsf_t;
typedef struct {
uint32 t1;
uint32 t2;
uint32 t3;
uint32 t4;
} tstamp_t;
static tstamp_t ts[TSMAX];
static tstamp_t maxdelayts;
static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0;
#endif /* WLMEDIA_HTSF */
struct ipv6_work_info_t {
uint8 if_idx;
char ipv6_addr[16];
unsigned long event;
};
/* When Perimeter locks are deployed, any blocking calls must be preceeded
* with a PERIM UNLOCK and followed by a PERIM LOCK.
* Examples of blocking calls are: schedule_timeout(), down_interruptible(),
* wait_event_timeout().
*/
/* Local private structure (extension of pub) */
typedef struct dhd_info {
#if defined(WL_WIRELESS_EXT)
wl_iw_t iw; /* wireless extensions state (must be first) */
#endif /* defined(WL_WIRELESS_EXT) */
dhd_pub_t pub;
dhd_if_t *iflist[DHD_MAX_IFS]; /* for supporting multiple interfaces */
void *adapter; /* adapter information, interrupt, fw path etc. */
char fw_path[PATH_MAX]; /* path to firmware image */
char nv_path[PATH_MAX]; /* path to nvram vars file */
struct semaphore proto_sem;
#ifdef PROP_TXSTATUS
spinlock_t wlfc_spinlock;
#endif /* PROP_TXSTATUS */
#ifdef WLMEDIA_HTSF
htsf_t htsf;
#endif
wait_queue_head_t ioctl_resp_wait;
wait_queue_head_t d3ack_wait;
uint32 default_wd_interval;
struct timer_list timer;
bool wd_timer_valid;
struct tasklet_struct tasklet;
spinlock_t sdlock;
spinlock_t txqlock;
spinlock_t dhd_lock;
struct mutex sdmutex;
tsk_ctl_t thr_dpc_ctl;
tsk_ctl_t thr_wdt_ctl;
tsk_ctl_t thr_rxf_ctl;
spinlock_t rxf_lock;
bool rxthread_enabled;
/* Wakelocks */
#if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
struct wake_lock wl_wifi; /* Wifi wakelock */
struct wake_lock wl_rxwake; /* Wifi rx wakelock */
struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
struct wake_lock wl_wdwake; /* Wifi wd wakelock */
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
/* net_device interface lock, prevent race conditions among net_dev interface
* calls and wifi_on or wifi_off
*/
struct mutex dhd_net_if_mutex;
struct mutex dhd_suspend_mutex;
#endif
spinlock_t wakelock_spinlock;
uint32 wakelock_counter;
int wakelock_wd_counter;
int wakelock_rx_timeout_enable;
int wakelock_ctrl_timeout_enable;
bool waive_wakelock;
uint32 wakelock_before_waive;
/* Thread to issue ioctl for multicast */
wait_queue_head_t ctrl_wait;
atomic_t pend_8021x_cnt;
dhd_attach_states_t dhd_state;
#ifdef SHOW_LOGTRACE
dhd_event_log_t event_data;
#endif /* SHOW_LOGTRACE */
#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
struct early_suspend early_suspend;
#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
#ifdef ARP_OFFLOAD_SUPPORT
u32 pend_ipaddr;
#endif /* ARP_OFFLOAD_SUPPORT */
#ifdef BCM_FD_AGGR
void *rpc_th;
void *rpc_osh;
struct timer_list rpcth_timer;
bool rpcth_timer_active;
bool fdaggr;
#endif
#ifdef DHDTCPACK_SUPPRESS
spinlock_t tcpack_lock;
#endif /* DHDTCPACK_SUPPRESS */
void *dhd_deferred_wq;
#ifdef DEBUG_CPU_FREQ
struct notifier_block freq_trans;
int __percpu *new_freq;
#endif
unsigned int unit;
struct notifier_block pm_notifier;
#ifdef SAR_SUPPORT
struct notifier_block sar_notifier;
s32 sar_enable;
#endif
} dhd_info_t;
#define DHDIF_FWDER(dhdif) FALSE
/* Flag to indicate if we should download firmware on driver load */
uint dhd_download_fw_on_driverload = TRUE;
/* Definitions to provide path to the firmware and nvram
* example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
*/
char firmware_path[MOD_PARAM_PATHLEN];
char nvram_path[MOD_PARAM_PATHLEN];
/* backup buffer for firmware and nvram path */
char fw_bak_path[MOD_PARAM_PATHLEN];
char nv_bak_path[MOD_PARAM_PATHLEN];
/* information string to keep firmware, chio, cheip version info visiable from log */
char info_string[MOD_PARAM_INFOLEN];
module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
int op_mode = 0;
int disable_proptx = 0;
module_param(op_mode, int, 0644);
extern int wl_control_wl_start(struct net_device *dev);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
struct semaphore dhd_registration_sem;
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
/* deferred handlers */
static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
#ifdef CONFIG_IPV6
static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
#endif
#ifdef WL_CFG80211
extern void dhd_netdev_free(struct net_device *ndev);
#endif /* WL_CFG80211 */
/* Error bits */
module_param(dhd_msg_level, int, 0);
#ifdef ARP_OFFLOAD_SUPPORT
/* ARP offload enable */
uint dhd_arp_enable = TRUE;
module_param(dhd_arp_enable, uint, 0);
/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
module_param(dhd_arp_mode, uint, 0);
#endif /* ARP_OFFLOAD_SUPPORT */
/* Disable Prop tx */
module_param(disable_proptx, int, 0644);
/* load firmware and/or nvram values from the filesystem */
module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
/* Watchdog interval */
/* extend watchdog expiration to 2 seconds when DPC is running */
#define WATCHDOG_EXTEND_INTERVAL (2000)
uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
module_param(dhd_watchdog_ms, uint, 0);
#if defined(DHD_DEBUG)
/* Console poll interval */
uint dhd_console_ms = 0;
module_param(dhd_console_ms, uint, 0644);
#endif /* defined(DHD_DEBUG) */
uint dhd_slpauto = TRUE;
module_param(dhd_slpauto, uint, 0);
#ifdef PKT_FILTER_SUPPORT
/* Global Pkt filter enable control */
uint dhd_pkt_filter_enable = TRUE;
module_param(dhd_pkt_filter_enable, uint, 0);
#endif
/* Pkt filter init setup */
uint dhd_pkt_filter_init = 0;
module_param(dhd_pkt_filter_init, uint, 0);
/* Pkt filter mode control */
uint dhd_master_mode = TRUE;
module_param(dhd_master_mode, uint, 0);
int dhd_watchdog_prio = 0;
module_param(dhd_watchdog_prio, int, 0);
/* DPC thread priority */
int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
module_param(dhd_dpc_prio, int, 0);
/* RX frame thread priority */
int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
module_param(dhd_rxf_prio, int, 0);
#if !defined(BCMDHDUSB)
extern int dhd_dongle_ramsize;
module_param(dhd_dongle_ramsize, int, 0);
#endif /* BCMDHDUSB */
/* Keep track of number of instances */
static int dhd_found = 0;
static int instance_base = 0; /* Starting instance number */
module_param(instance_base, int, 0644);
/* DHD Perimiter lock only used in router with bypass forwarding. */
#define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
#define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
#define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
#define DHD_PERIM_LOCK_ALL() do { /* noop */ } while (0)
#define DHD_PERIM_UNLOCK_ALL() do { /* noop */ } while (0)
#ifdef PCIE_FULL_DONGLE
#if defined(BCM_GMAC3)
#define DHD_IF_STA_LIST_LOCK_INIT(ifp) do { /* noop */ } while (0)
#define DHD_IF_STA_LIST_LOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
#else /* ! BCM_GMAC3 */
#define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
#define DHD_IF_STA_LIST_LOCK(ifp, flags) \
spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
#endif /* ! BCM_GMAC3 */
#endif /* PCIE_FULL_DONGLE */
/* Control fw roaming */
uint dhd_roam_disable = 0;
/* Control radio state */
uint dhd_radio_up = 1;
/* Network inteface name */
char iface_name[IFNAMSIZ] = {'\0'};
module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
/* The following are specific to the SDIO dongle */
/* IOCTL response timeout */
int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
/* Idle timeout for backplane clock */
int dhd_idletime = DHD_IDLETIME_TICKS;
module_param(dhd_idletime, int, 0);
/* Use polling */
uint dhd_poll = FALSE;
module_param(dhd_poll, uint, 0);
/* Use interrupts */
uint dhd_intr = TRUE;
module_param(dhd_intr, uint, 0);
/* SDIO Drive Strength (in milliamps) */
uint dhd_sdiod_drive_strength = 6;
module_param(dhd_sdiod_drive_strength, uint, 0);
#ifdef BCMSDIO
/* Tx/Rx bounds */
extern uint dhd_txbound;
extern uint dhd_rxbound;
module_param(dhd_txbound, uint, 0);
module_param(dhd_rxbound, uint, 0);
/* Deferred transmits */
extern uint dhd_deferred_tx;
module_param(dhd_deferred_tx, uint, 0);
#ifdef BCMDBGFS
extern void dhd_dbgfs_init(dhd_pub_t *dhdp);
extern void dhd_dbgfs_remove(void);
#endif /* BCMDBGFS */
#endif /* BCMSDIO */
#ifdef SDTEST
/* Echo packet generator (pkts/s) */
uint dhd_pktgen = 0;
module_param(dhd_pktgen, uint, 0);
/* Echo packet len (0 => sawtooth, max 2040) */
uint dhd_pktgen_len = 0;
module_param(dhd_pktgen_len, uint, 0);
#endif /* SDTEST */
extern char dhd_version[];
int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
static void dhd_net_if_lock_local(dhd_info_t *dhd);
static void dhd_net_if_unlock_local(dhd_info_t *dhd);
static void dhd_suspend_lock(dhd_pub_t *dhdp);
static void dhd_suspend_unlock(dhd_pub_t *dhdp);
#ifdef WLMEDIA_HTSF
void htsf_update(dhd_info_t *dhd, void *data);
tsf_t prev_tsf, cur_tsf;
uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
static void dhd_dump_latency(void);
static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
static void dhd_dump_htsfhisto(histo_t *his, char *s);
#endif /* WLMEDIA_HTSF */
/* Monitor interface */
int dhd_monitor_init(void *dhd_pub);
int dhd_monitor_uninit(void);
#if defined(WL_WIRELESS_EXT)
struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
#endif /* defined(WL_WIRELESS_EXT) */
static void dhd_dpc(ulong data);
/* forward decl */
extern int dhd_wait_pend8021x(struct net_device *dev);
void dhd_os_wd_timer_extend(void *bus, bool extend);
#ifdef TOE
#ifndef BDC
#error TOE requires BDC
#endif /* !BDC */
static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
#endif /* TOE */
static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
size_t pktlen, wl_event_msg_t *event_ptr, void **data_ptr);
#ifdef DHD_UNICAST_DHCP
static const uint8 llc_snap_hdr[SNAP_HDR_LEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
static int dhd_get_pkt_ip_type(dhd_pub_t *dhd, void *skb, uint8 **data_ptr,
int *len_ptr, uint8 *prot_ptr);
static int dhd_get_pkt_ether_type(dhd_pub_t *dhd, void *skb, uint8 **data_ptr,
int *len_ptr, uint16 *et_ptr, bool *snap_ptr);
static int dhd_convert_dhcp_broadcast_ack_to_unicast(dhd_pub_t *pub, void *pktbuf, int ifidx);
#endif /* DHD_UNICAST_DHCP */
#ifdef DHD_L2_FILTER
static int dhd_l2_filter_block_ping(dhd_pub_t *pub, void *pktbuf, int ifidx);
#endif
#if defined(CONFIG_PM_SLEEP)
static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
{
int ret = NOTIFY_DONE;
bool suspend = FALSE;
dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
BCM_REFERENCE(dhdinfo);
switch (action) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
suspend = TRUE;
break;
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
suspend = FALSE;
break;
}
#if defined(SUPPORT_P2P_GO_PS)
#ifdef PROP_TXSTATUS
if (suspend) {
DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
dhd_wlfc_suspend(&dhdinfo->pub);
DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
} else
dhd_wlfc_resume(&dhdinfo->pub);
#endif
#endif /* defined(SUPPORT_P2P_GO_PS) */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
KERNEL_VERSION(2, 6, 39))
dhd_mmc_suspend = suspend;
smp_mb();
#endif
return ret;
}
static struct notifier_block dhd_pm_notifier = {
.notifier_call = dhd_pm_callback,
.priority = 10
};
/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
* created in kernel notifier link list (with 'next' pointing to itself)
*/
static bool dhd_pm_notifier_registered = FALSE;
extern int register_pm_notifier(struct notifier_block *nb);
extern int unregister_pm_notifier(struct notifier_block *nb);
#endif /* CONFIG_PM_SLEEP */
/* Request scheduling of the bus rx frame */
static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
static void dhd_os_rxflock(dhd_pub_t *pub);
static void dhd_os_rxfunlock(dhd_pub_t *pub);
/** priv_link is the link between netdev and the dhdif and dhd_info structs. */
typedef struct dhd_dev_priv {
dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */
int ifidx; /* interface index */
} dhd_dev_priv_t;
#define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
#define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
#define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
#define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
#define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
#if defined(DHD_OF_SUPPORT)
extern int dhd_wlan_init(void);
extern void dhd_wlan_exit(void);
#endif /* defined(DHD_OF_SUPPORT) */
/** Clear the dhd net_device's private structure. */
static inline void
dhd_dev_priv_clear(struct net_device * dev)
{
dhd_dev_priv_t * dev_priv;
ASSERT(dev != (struct net_device *)NULL);
dev_priv = DHD_DEV_PRIV(dev);
dev_priv->dhd = (dhd_info_t *)NULL;
dev_priv->ifp = (dhd_if_t *)NULL;
dev_priv->ifidx = DHD_BAD_IF;
}
/** Setup the dhd net_device's private structure. */
static inline void
dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
int ifidx)
{
dhd_dev_priv_t * dev_priv;
ASSERT(dev != (struct net_device *)NULL);
dev_priv = DHD_DEV_PRIV(dev);
dev_priv->dhd = dhd;
dev_priv->ifp = ifp;
dev_priv->ifidx = ifidx;
}
#ifdef SAR_SUPPORT
static int dhd_sar_callback(struct notifier_block *nfb, unsigned long action, void *data)
{
dhd_info_t *dhd = (dhd_info_t*)container_of(nfb, struct dhd_info, sar_notifier);
char iovbuf[32];
s32 sar_enable;
s32 txpower;
int ret;
if (dhd->pub.busstate == DHD_BUS_DOWN || dhd->pub.up == 0) {
DHD_ERROR(("%s Not ready, Bus state %d firmware state %d\n",
__FUNCTION__, dhd->pub.busstate, dhd->pub.up));
return NOTIFY_BAD;
}
if (data) {
/* if data != NULL then we expect that the notifier passed
* the exact value of max tx power in quarters of dB.
* qtxpower variable allows us to overwrite TX power.
*/
txpower = *(s32*)data;
if (txpower == -1 || txpower >= 127)
txpower = 127; /* Max val of 127 qdbm */
else
txpower |= WL_TXPWR_OVERRIDE;
txpower = htod32(txpower);
bcm_mkiovar("qtxpower", (char *)&txpower, 4, iovbuf, sizeof(iovbuf));
if ((ret = dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR,
iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
DHD_ERROR(("%s wl qtxpower failed %d\n", __FUNCTION__, ret));
} else {
/* '1' means activate sarlimit and '0' means back to normal
* state (deactivate sarlimit)
*/
sar_enable = action ? 1 : 0;
if (dhd->sar_enable == sar_enable)
return NOTIFY_DONE;
bcm_mkiovar("sar_enable", (char *)&sar_enable, 4, iovbuf, sizeof(iovbuf));
if ((ret = dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
DHD_ERROR(("%s wl sar_enable %d failed %d\n", __FUNCTION__, sar_enable, ret));
else
dhd->sar_enable = sar_enable;
}
return NOTIFY_DONE;
}
static bool dhd_sar_notifier_registered = FALSE;
extern int register_notifier_by_sar(struct notifier_block *nb);
extern int unregister_notifier_by_sar(struct notifier_block *nb);
#endif
#ifdef PCIE_FULL_DONGLE
/** Dummy objects are defined with state representing bad|down.
* Performance gains from reducing branch conditionals, instruction parallelism,
* dual issue, reducing load shadows, avail of larger pipelines.
* Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
* is accessed via the dhd_sta_t.
*/
/* Dummy dhd_info object */
dhd_info_t dhd_info_null = {
#if defined(BCM_GMAC3)
.fwdh = FWDER_NULL,
#endif
.pub = {
.info = &dhd_info_null,
#ifdef DHDTCPACK_SUPPRESS
.tcpack_sup_mode = TCPACK_SUP_REPLACE,
#endif /* DHDTCPACK_SUPPRESS */
.up = FALSE, .busstate = DHD_BUS_DOWN
}
};
#define DHD_INFO_NULL (&dhd_info_null)
#define DHD_PUB_NULL (&dhd_info_null.pub)
/* Dummy netdevice object */
struct net_device dhd_net_dev_null = {
.reg_state = NETREG_UNREGISTERED
};
#define DHD_NET_DEV_NULL (&dhd_net_dev_null)
/* Dummy dhd_if object */
dhd_if_t dhd_if_null = {
#if defined(BCM_GMAC3)
.fwdh = FWDER_NULL,
#endif
#ifdef WMF
.wmf = { .wmf_enable = TRUE },
#endif
.info = DHD_INFO_NULL,
.net = DHD_NET_DEV_NULL,
.idx = DHD_BAD_IF
};
#define DHD_IF_NULL (&dhd_if_null)
#define DHD_STA_NULL ((dhd_sta_t *)NULL)
/** Interface STA list management. */
/** Fetch the dhd_if object, given the interface index in the dhd. */
static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx);
/** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
/* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
static void dhd_if_del_sta_list(dhd_if_t * ifp);
static void dhd_if_flush_sta(dhd_if_t * ifp);
/* Construct/Destruct a sta pool. */
static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
/* Return interface pointer */
static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
{
ASSERT(ifidx < DHD_MAX_IFS);
if (ifidx >= DHD_MAX_IFS) {
return NULL;
}
return dhdp->info->iflist[ifidx];
}
/** Reset a dhd_sta object and free into the dhd pool. */
static void
dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
{
int prio;
ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
id16_map_free(dhdp->staid_allocator, sta->idx);
for (prio = 0; prio < (int)NUMPRIO; prio++)
sta->flowid[prio] = FLOWID_INVALID;
sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
sta->ifidx = DHD_BAD_IF;
bzero(sta->ea.octet, ETHER_ADDR_LEN);
INIT_LIST_HEAD(&sta->list);
sta->idx = ID16_INVALID; /* implying free */
}
/** Allocate a dhd_sta object from the dhd pool. */
static dhd_sta_t *
dhd_sta_alloc(dhd_pub_t * dhdp)
{
uint16 idx;
dhd_sta_t * sta;
dhd_sta_pool_t * sta_pool;
ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
idx = id16_map_alloc(dhdp->staid_allocator);
if (idx == ID16_INVALID) {
DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
return DHD_STA_NULL;
}
sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
sta = &sta_pool[idx];
ASSERT((sta->idx == ID16_INVALID) &&
(sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
sta->idx = idx; /* implying allocated */
return sta;
}
/** Delete all STAs in an interface's STA list. */
static void
dhd_if_del_sta_list(dhd_if_t *ifp)
{
dhd_sta_t *sta, *next;
unsigned long flags;
DHD_IF_STA_LIST_LOCK(ifp, flags);
list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
#if defined(BCM_GMAC3)
if (ifp->fwdh) {
/* Remove sta from WOFA forwarder. */
fwder_deassoc(ifp->fwdh, (uint16 *)(sta->ea.octet), (wofa_t)sta);
}
#endif /* BCM_GMAC3 */
list_del(&sta->list);
dhd_sta_free(&ifp->info->pub, sta);
}
DHD_IF_STA_LIST_UNLOCK(ifp, flags);
return;
}
/** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
static void
dhd_if_flush_sta(dhd_if_t * ifp)
{
#if defined(BCM_GMAC3)
if (ifp && (ifp->fwdh != FWDER_NULL)) {
dhd_sta_t *sta, *next;
unsigned long flags;
DHD_IF_STA_LIST_LOCK(ifp, flags);
list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
/* Remove any sta entry from WOFA forwarder. */
fwder_flush(ifp->fwdh, (wofa_t)sta);
}
DHD_IF_STA_LIST_UNLOCK(ifp, flags);
}
#endif /* BCM_GMAC3 */
}
/** Construct a pool of dhd_sta_t objects to be used by interfaces. */
static int
dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
{
int idx, sta_pool_memsz;
dhd_sta_t * sta;
dhd_sta_pool_t * sta_pool;
void * staid_allocator;
ASSERT(dhdp != (dhd_pub_t *)NULL);
ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
/* dhd_sta objects per radio are managed in a table. id#0 reserved. */
staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
if (staid_allocator == NULL) {
DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
return BCME_ERROR;
}
/* Pre allocate a pool of dhd_sta objects (one extra). */
sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
if (sta_pool == NULL) {
DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
id16_map_fini(dhdp->osh, staid_allocator);
return BCME_ERROR;
}
dhdp->sta_pool = sta_pool;
dhdp->staid_allocator = staid_allocator;
/* Initialize all sta(s) for the pre-allocated free pool. */
bzero((uchar *)sta_pool, sta_pool_memsz);
for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
sta = &sta_pool[idx];
sta->idx = id16_map_alloc(staid_allocator);
ASSERT(sta->idx <= max_sta);
}
/* Now place them into the pre-allocated free pool. */
for (idx = 1; idx <= max_sta; idx++) {
sta = &sta_pool[idx];
dhd_sta_free(dhdp, sta);
}
return BCME_OK;
}
/** Destruct the pool of dhd_sta_t objects.
* Caller must ensure that no STA objects are currently associated with an if.
*/
static void
dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
{
dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
if (sta_pool) {
int idx;
int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
for (idx = 1; idx <= max_sta; idx++) {
ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
ASSERT(sta_pool[idx].idx == ID16_INVALID);
}
MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
dhdp->sta_pool = NULL;
}
id16_map_fini(dhdp->osh, dhdp->staid_allocator);
dhdp->staid_allocator = NULL;
}
/* Clear the pool of dhd_sta_t objects for built-in type driver */
static void
dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
{
int idx, sta_pool_memsz;
dhd_sta_t * sta;
dhd_sta_pool_t * sta_pool;
void *staid_allocator;
if (!dhdp) {
DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
return;
}
sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
staid_allocator = dhdp->staid_allocator;
if (!sta_pool) {
DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
return;
}
if (!staid_allocator) {
DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
return;
}
/* clear free pool */
sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
bzero((uchar *)sta_pool, sta_pool_memsz);
/* dhd_sta objects per radio are managed in a table. id#0 reserved. */
id16_map_clear(staid_allocator, max_sta, 1);
/* Initialize all sta(s) for the pre-allocated free pool. */
for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
sta = &sta_pool[idx];
sta->idx = id16_map_alloc(staid_allocator);
ASSERT(sta->idx <= max_sta);
}
/* Now place them into the pre-allocated free pool. */
for (idx = 1; idx <= max_sta; idx++) {
sta = &sta_pool[idx];
dhd_sta_free(dhdp, sta);
}
}
/** Find STA with MAC address ea in an interface's STA list. */
dhd_sta_t *
dhd_find_sta(void *pub, int ifidx, void *ea)
{
dhd_sta_t *sta, *next;
dhd_if_t *ifp;
unsigned long flags;
ASSERT(ea != NULL);
ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
if (ifp == NULL)
return DHD_STA_NULL;
DHD_IF_STA_LIST_LOCK(ifp, flags);
list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
DHD_IF_STA_LIST_UNLOCK(ifp, flags);
return sta;
}
}
DHD_IF_STA_LIST_UNLOCK(ifp, flags);
return DHD_STA_NULL;
}
/** Add STA into the interface's STA list. */
dhd_sta_t *
dhd_add_sta(void *pub, int ifidx, void *ea)
{
dhd_sta_t *sta;
dhd_if_t *ifp;
unsigned long flags;
ASSERT(ea != NULL);
ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
if (ifp == NULL)
return DHD_STA_NULL;
sta = dhd_sta_alloc((dhd_pub_t *)pub);
if (sta == DHD_STA_NULL) {
DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
return DHD_STA_NULL;
}
memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
/* link the sta and the dhd interface */
sta->ifp = ifp;
sta->ifidx = ifidx;
INIT_LIST_HEAD(&sta->list);
DHD_IF_STA_LIST_LOCK(ifp, flags);
list_add_tail(&sta->list, &ifp->sta_list);
#if defined(BCM_GMAC3)
if (ifp->fwdh) {
ASSERT(ISALIGNED(ea, 2));
/* Add sta to WOFA forwarder. */
fwder_reassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
}
#endif /* BCM_GMAC3 */
DHD_IF_STA_LIST_UNLOCK(ifp, flags);
return sta;
}
/** Delete STA from the interface's STA list. */
void
dhd_del_sta(void *pub, int ifidx, void *ea)
{
dhd_sta_t *sta, *next;
dhd_if_t *ifp;
unsigned long flags;
ASSERT(ea != NULL);
ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
if (ifp == NULL)
return;
DHD_IF_STA_LIST_LOCK(ifp, flags);
list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
#if defined(BCM_GMAC3)
if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */
ASSERT(ISALIGNED(ea, 2));
fwder_deassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
}
#endif /* BCM_GMAC3 */
list_del(&sta->list);
dhd_sta_free(&ifp->info->pub, sta);
}
}
DHD_IF_STA_LIST_UNLOCK(ifp, flags);
return;
}
/** Add STA if it doesn't exist. Not reentrant. */
dhd_sta_t*
dhd_findadd_sta(void *pub, int ifidx, void *ea)
{
dhd_sta_t *sta;
sta = dhd_find_sta(pub, ifidx, ea);
if (!sta) {
/* Add entry */
sta = dhd_add_sta(pub, ifidx, ea);
}
return sta;
}
#else
static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
void dhd_del_sta(void *pub, int ifidx, void *ea) {}
#endif /* PCIE_FULL_DONGLE */
/* Returns dhd iflist index correspondig the the bssidx provided by apps */
int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
{
dhd_if_t *ifp;
dhd_info_t *dhd = dhdp->info;
int i;
ASSERT(bssidx < DHD_MAX_IFS);
ASSERT(dhdp);
for (i = 0; i < DHD_MAX_IFS; i++) {
ifp = dhd->iflist[i];
if (ifp && (ifp->bssidx == bssidx)) {
DHD_TRACE(("Index manipulated for %s from %d to %d\n",
ifp->name, bssidx, i));
break;
}
}
return i;
}
static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
{
uint32 store_idx;
uint32 sent_idx;
if (!skb) {
DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
return BCME_ERROR;
}
dhd_os_rxflock(dhdp);
store_idx = dhdp->store_idx;
sent_idx = dhdp->sent_idx;
if (dhdp->skbbuf[store_idx] != NULL) {
/* Make sure the previous packets are processed */
dhd_os_rxfunlock(dhdp);
#ifdef RXF_DEQUEUE_ON_BUSY
DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
skb, store_idx, sent_idx));
return BCME_BUSY;
#else /* RXF_DEQUEUE_ON_BUSY */
DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
skb, store_idx, sent_idx));
/* removed msleep here, should use wait_event_timeout if we
* want to give rx frame thread a chance to run
*/
#if defined(WAIT_DEQUEUE)
OSL_SLEEP(1);
#endif
return BCME_ERROR;
#endif /* RXF_DEQUEUE_ON_BUSY */
}
DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
dhdp->skbbuf[store_idx] = skb;
dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
dhd_os_rxfunlock(dhdp);
return BCME_OK;
}
static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
{
uint32 store_idx;
uint32 sent_idx;
void *skb;
dhd_os_rxflock(dhdp);
store_idx = dhdp->store_idx;
sent_idx = dhdp->sent_idx;
skb = dhdp->skbbuf[sent_idx];
if (skb == NULL) {
dhd_os_rxfunlock(dhdp);
DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
store_idx, sent_idx));
return NULL;
}
dhdp->skbbuf[sent_idx] = NULL;
dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
skb, sent_idx));
dhd_os_rxfunlock(dhdp);
return skb;
}
int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
{
dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
if (prepost) { /* pre process */
dhd_read_macaddr(dhd);
} else { /* post process */
dhd_write_macaddr(&dhd->pub.mac);
}
return 0;
}
#if defined(PKT_FILTER_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
static bool
_turn_on_arp_filter(dhd_pub_t *dhd, int op_mode)
{
bool _apply = FALSE;
/* In case of IBSS mode, apply arp pkt filter */
if (op_mode & DHD_FLAG_IBSS_MODE) {
_apply = TRUE;
goto exit;
}
/* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
if ((dhd->arp_version == 1) &&
(op_mode & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
_apply = TRUE;
goto exit;
}
exit:
return _apply;
}
#endif /* PKT_FILTER_SUPPORT && !GAN_LITE_NAT_KEEPALIVE_FILTER */
void dhd_set_packet_filter(dhd_pub_t *dhd)
{
#ifdef PKT_FILTER_SUPPORT
int i;
DHD_TRACE(("%s: enter\n", __FUNCTION__));
if (dhd_pkt_filter_enable) {
for (i = 0; i < dhd->pktfilter_count; i++) {
dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
}
}
#endif /* PKT_FILTER_SUPPORT */
}
void dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
{
#ifdef PKT_FILTER_SUPPORT
int i;
DHD_TRACE(("%s: enter, value = %d\n", __FUNCTION__, value));
/* 1 - Enable packet filter, only allow unicast packet to send up */
/* 0 - Disable packet filter */
if (dhd_pkt_filter_enable && (!value ||
(dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress)))
{
for (i = 0; i < dhd->pktfilter_count; i++) {
#ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
if (value && (i == DHD_ARP_FILTER_NUM) &&
!_turn_on_arp_filter(dhd, dhd->op_mode)) {
DHD_TRACE(("Do not turn on ARP white list pkt filter:"
"val %d, cnt %d, op_mode 0x%x\n",
value, i, dhd->op_mode));
continue;
}
#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
value, dhd_master_mode);
}
}
#endif /* PKT_FILTER_SUPPORT */
}
static int dhd_set_suspend(int value, dhd_pub_t *dhd)
{
#ifndef SUPPORT_PM2_ONLY
int power_mode = PM_MAX;
#endif /* SUPPORT_PM2_ONLY */
/* wl_pkt_filter_enable_t enable_parm; */
char iovbuf[32];
int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
#ifndef ENABLE_FW_ROAM_SUSPEND
uint roamvar = 1;
#endif /* ENABLE_FW_ROAM_SUSPEND */
uint nd_ra_filter = 0;
int lpas = 0;
int dtim_period = 0;
int bcn_interval = 0;
int bcn_to_dly = 0;
int bcn_timeout = CUSTOM_BCN_TIMEOUT_SETTING;
int ret = 0;
if (!dhd)
return -ENODEV;
DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
__FUNCTION__, value, dhd->in_suspend));
dhd_suspend_lock(dhd);
#ifdef CUSTOM_SET_CPUCORE
DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
/* set specific cpucore */
dhd_set_cpucore(dhd, TRUE);
#endif /* CUSTOM_SET_CPUCORE */
if (dhd->up) {
if (value && dhd->in_suspend) {
#ifdef PKT_FILTER_SUPPORT
dhd->early_suspended = 1;
#endif
/* Kernel suspended */
DHD_ERROR(("%s: force extra Suspend setting \n", __FUNCTION__));
#ifdef CUSTOM_SET_SHORT_DWELL_TIME
dhd_set_short_dwell_time(dhd, TRUE);
#endif
#ifndef SUPPORT_PM2_ONLY
dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
sizeof(power_mode), TRUE, 0);
#endif /* SUPPORT_PM2_ONLY */
/* Enable packet filter, only allow unicast packet to send up */
dhd_enable_packet_filter(1, dhd);
/* If DTIM skip is set up as default, force it to wake
* each third DTIM for better power savings. Note that
* one side effect is a chance to miss BC/MC packet.
*/
bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd, &dtim_period, &bcn_interval);
dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim, sizeof(bcn_li_dtim), 1);
if (bcn_li_dtim * dtim_period * bcn_interval >= MIN_DTIM_FOR_ROAM_THRES_EXTEND) {
/*
* Increase max roaming threshold from 2 secs to 8 secs
* the real roam threshold is MIN(max_roam_threshold, bcn_timeout/2)
*/
lpas = 1;
dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas), 1);
bcn_to_dly = 1;
/*
* if bcn_to_dly is 1, the real roam threshold is MIN(max_roam_threshold, bcn_timeout -1);
* notify link down event after roaming procedure complete if we hit bcn_timeout
* while we are in roaming progress.
*
*/
dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly, sizeof(bcn_to_dly), 1);
/* Increase beacon timeout to 6 secs */
bcn_timeout = (bcn_timeout < BCN_TIMEOUT_IN_SUSPEND) ?
BCN_TIMEOUT_IN_SUSPEND : bcn_timeout;
dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout, sizeof(bcn_timeout), 1);
}
#ifndef ENABLE_FW_ROAM_SUSPEND
/* Disable firmware roaming during suspend */
bcm_mkiovar("roam_off", (char *)&roamvar, 4,
iovbuf, sizeof(iovbuf));
dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
#endif /* ENABLE_FW_ROAM_SUSPEND */
if (FW_SUPPORTED(dhd, ndoe)) {
/* enable IPv6 RA filter in firmware during suspend */
nd_ra_filter = 1;
bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
iovbuf, sizeof(iovbuf));
if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
sizeof(iovbuf), TRUE, 0)) < 0)
DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
ret));
}
dhd_os_suppress_logging(dhd, TRUE);
} else {
#ifdef PKT_FILTER_SUPPORT
dhd->early_suspended = 0;
#endif
/* Kernel resumed */
DHD_ERROR(("%s: Remove extra suspend setting \n", __FUNCTION__));
#ifdef CUSTOM_SET_SHORT_DWELL_TIME
dhd_set_short_dwell_time(dhd, FALSE);
#endif
#ifndef SUPPORT_PM2_ONLY
power_mode = PM_FAST;
dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
sizeof(power_mode), TRUE, 0);
#endif /* SUPPORT_PM2_ONLY */
#ifdef PKT_FILTER_SUPPORT
/* disable pkt filter */
dhd_enable_packet_filter(0, dhd);
#endif /* PKT_FILTER_SUPPORT */
/* restore pre-suspend setting */
dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim, sizeof(bcn_li_dtim), 1);
dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas), 1);
dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly, sizeof(bcn_to_dly), 1);
dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout, sizeof(bcn_timeout), 1);
#ifndef ENABLE_FW_ROAM_SUSPEND
roamvar = dhd_roam_disable;
bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf,
sizeof(iovbuf));
dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
#endif /* ENABLE_FW_ROAM_SUSPEND */
if (FW_SUPPORTED(dhd, ndoe)) {
/* disable IPv6 RA filter in firmware during suspend */
nd_ra_filter = 0;
bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
iovbuf, sizeof(iovbuf));
if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
sizeof(iovbuf), TRUE, 0)) < 0)
DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
ret));
}
dhd_os_suppress_logging(dhd, FALSE);
}
}
dhd_suspend_unlock(dhd);
return 0;
}
static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
{
dhd_pub_t *dhdp = &dhd->pub;
int ret = 0;
DHD_OS_WAKE_LOCK(dhdp);
DHD_PERIM_LOCK(dhdp);
/* Set flag when early suspend was called */
dhdp->in_suspend = val;
if ((force || !dhdp->suspend_disable_flag) &&
dhd_support_sta_mode(dhdp))
{
ret = dhd_set_suspend(val, dhdp);
}
DHD_PERIM_UNLOCK(dhdp);
DHD_OS_WAKE_UNLOCK(dhdp);
return ret;
}
#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
static void dhd_early_suspend(struct early_suspend *h)
{
struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
if (dhd)
dhd_suspend_resume_helper(dhd, 1, 0);
}
static void dhd_late_resume(struct early_suspend *h)
{
struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
if (dhd)
dhd_suspend_resume_helper(dhd, 0, 0);
}
#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
/*
* Generalized timeout mechanism. Uses spin sleep with exponential back-off until
* the sleep time reaches one jiffy, then switches over to task delay. Usage:
*
* dhd_timeout_start(&tmo, usec);
* while (!dhd_timeout_expired(&tmo))
* if (poll_something())
* break;
* if (dhd_timeout_expired(&tmo))
* fatal();
*/
#ifdef CONFIG_PARTIALRESUME
static unsigned int dhd_get_ipv6_stat(u8 type)
{
static unsigned int ra = 0;
static unsigned int na = 0;
static unsigned int other = 0;
switch (type) {
case NDISC_ROUTER_ADVERTISEMENT:
ra++;
return ra;
case NDISC_NEIGHBOUR_ADVERTISEMENT:
na++;
return na;
default:
other++;
break;
}
return other;
}
#endif
static int dhd_rx_suspend_again(struct sk_buff *skb)
{
#ifdef CONFIG_PARTIALRESUME
u8 *pptr = skb_mac_header(skb);
if (pptr &&
(memcmp(pptr, "\x33\x33\x00\x00\x00\x01", ETHER_ADDR_LEN) == 0) &&
(ntoh16(skb->protocol) == ETHER_TYPE_IPV6)) {
u8 type = 0;
#define ETHER_ICMP6_TYPE 54
#define ETHER_ICMP6_DADDR 38
if (skb->len > ETHER_ICMP6_TYPE)
type = pptr[ETHER_ICMP6_TYPE];
if ((type == NDISC_NEIGHBOUR_ADVERTISEMENT) &&
(ipv6_addr_equal(&in6addr_linklocal_allnodes,
(const struct in6_addr *)(pptr + ETHER_ICMP6_DADDR)))) {
pr_debug("%s: Suspend, type = %d [%u]\n", __func__,
type, dhd_get_ipv6_stat(type));
return 0;
} else {
pr_debug("%s: Resume, type = %d [%u]\n", __func__,
type, dhd_get_ipv6_stat(type));
}
#undef ETHER_ICMP6_TYPE
#undef ETHER_ICMP6_DADDR
}
#endif
return DHD_PACKET_TIMEOUT_MS;
}
void
dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
{
tmo->limit = usec;
tmo->increment = 0;
tmo->elapsed = 0;
tmo->tick = jiffies_to_usecs(1);
}
int
dhd_timeout_expired(dhd_timeout_t *tmo)
{
/* Does nothing the first call */
if (tmo->increment == 0) {
tmo->increment = 1;
return 0;
}
if (tmo->elapsed >= tmo->limit)
return 1;
/* Add the delay that's about to take place */
tmo->elapsed += tmo->increment;
if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
OSL_DELAY(tmo->increment);
tmo->increment *= 2;
if (tmo->increment > tmo->tick)
tmo->increment = tmo->tick;
} else {
wait_queue_head_t delay_wait;
DECLARE_WAITQUEUE(wait, current);
init_waitqueue_head(&delay_wait);
add_wait_queue(&delay_wait, &wait);
set_current_state(TASK_INTERRUPTIBLE);
(void)schedule_timeout(1);
remove_wait_queue(&delay_wait, &wait);
set_current_state(TASK_RUNNING);
}
return 0;
}
int
dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
{
int i = 0;
ASSERT(dhd);
while (i < DHD_MAX_IFS) {
if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
return i;
i++;
}
return DHD_BAD_IF;
}
struct net_device * dhd_idx2net(void *pub, int ifidx)
{
struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
struct dhd_info *dhd_info;
if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
return NULL;
dhd_info = dhd_pub->info;
if (dhd_info && dhd_info->iflist[ifidx])
return dhd_info->iflist[ifidx]->net;
return NULL;
}
int
dhd_ifname2idx(dhd_info_t *dhd, char *name)
{
int i = DHD_MAX_IFS;
ASSERT(dhd);
if (name == NULL || *name == '\0')
return 0;
while (--i > 0)
if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->name, name, IFNAMSIZ))
break;
DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
return i; /* default - the primary interface */
}
int
dhd_ifidx2hostidx(dhd_info_t *dhd, int ifidx)
{
int i = DHD_MAX_IFS;
ASSERT(dhd);
if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
DHD_TRACE(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
return 0; /* default - the primary interface */
}
while (--i > 0)
if (dhd->iflist[i] && (dhd->iflist[i]->idx == ifidx))
break;
DHD_TRACE(("%s: return hostidx %d for ifidx %d\n", __FUNCTION__, i, ifidx));
return i; /* default - the primary interface */
}
char *
dhd_ifname(dhd_pub_t *dhdp, int ifidx)
{
dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
ASSERT(dhd);
if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
return "<if_bad>";
}
if (dhd->iflist[ifidx] == NULL) {
DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
return "<if_null>";
}
if (dhd->iflist[ifidx]->net)
return dhd->iflist[ifidx]->net->name;
return "<if_none>";
}
uint8 *
dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
{
int i;
dhd_info_t *dhd = (dhd_info_t *)dhdp;
ASSERT(dhd);
for (i = 0; i < DHD_MAX_IFS; i++)
if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
return dhd->iflist[i]->mac_addr;
return NULL;
}
static void
_dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
{
struct net_device *dev;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
struct netdev_hw_addr *ha;
#else
struct dev_mc_list *mclist;
#endif
uint32 allmulti, cnt;
wl_ioctl_t ioc;
char *buf, *bufp;
uint buflen;
int ret;
ASSERT(dhd && dhd->iflist[ifidx]);
dev = dhd->iflist[ifidx]->net;
if (!dev)
return;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
netif_addr_lock_bh(dev);
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
cnt = netdev_mc_count(dev);
#else
cnt = dev->mc_count;
#endif /* LINUX_VERSION_CODE */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
netif_addr_unlock_bh(dev);
#endif
/* Determine initial value of allmulti flag */
allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
/* Send down the multicast list first. */
buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
dhd_ifname(&dhd->pub, ifidx), cnt));
return;
}
strncpy(bufp, "mcast_list", buflen - 1);
bufp[buflen - 1] = '\0';
bufp += strlen("mcast_list") + 1;
cnt = htol32(cnt);
memcpy(bufp, &cnt, sizeof(cnt));
bufp += sizeof(cnt);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
netif_addr_lock_bh(dev);
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
netdev_for_each_mc_addr(ha, dev) {
if (!cnt)
break;
memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
bufp += ETHER_ADDR_LEN;
cnt--;
}
#else
for (mclist = dev->mc_list; (mclist && (cnt > 0));
cnt--, mclist = mclist->next) {
memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
bufp += ETHER_ADDR_LEN;
}
#endif /* LINUX_VERSION_CODE */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
netif_addr_unlock_bh(dev);
#endif
memset(&ioc, 0, sizeof(ioc));
ioc.cmd = WLC_SET_VAR;
ioc.buf = buf;
ioc.len = buflen;
ioc.set = TRUE;
ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
if (ret < 0) {
DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
dhd_ifname(&dhd->pub, ifidx), cnt));
allmulti = cnt ? TRUE : allmulti;
}
MFREE(dhd->pub.osh, buf, buflen);
/* Now send the allmulti setting. This is based on the setting in the
* net_device flags, but might be modified above to be turned on if we
* were trying to set some addresses and dongle rejected it...
*/
buflen = sizeof("allmulti") + sizeof(allmulti);
if (!(buf = MALLOC(dhd->pub.osh, buflen))) {
DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx)));
return;
}
allmulti = htol32(allmulti);
if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) {
DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen));
MFREE(dhd->pub.osh, buf, buflen);
return;
}
memset(&ioc, 0, sizeof(ioc));
ioc.cmd = WLC_SET_VAR;
ioc.buf = buf;
ioc.len = buflen;
ioc.set = TRUE;
ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
if (ret < 0) {
DHD_ERROR(("%s: set allmulti %d failed\n",
dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
}
MFREE(dhd->pub.osh, buf, buflen);
/* Finally, pick up the PROMISC flag as well, like the NIC driver does */
allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
allmulti = htol32(allmulti);
memset(&ioc, 0, sizeof(ioc));
ioc.cmd = WLC_SET_PROMISC;
ioc.buf = &allmulti;
ioc.len = sizeof(allmulti);
ioc.set = TRUE;
ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
if (ret < 0) {
DHD_ERROR(("%s: set promisc %d failed\n",
dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
}
}
int
_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
{
char buf[32];
wl_ioctl_t ioc;
int ret;
if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) {
DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx)));
return -1;
}
memset(&ioc, 0, sizeof(ioc));
ioc.cmd = WLC_SET_VAR;
ioc.buf = buf;
ioc.len = 32;
ioc.set = TRUE;
ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
if (ret < 0) {
DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
} else {
memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
if (ifidx == 0)
memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
}
return ret;
}
#ifdef SOFTAP
extern struct net_device *ap_net_dev;
extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
#endif
static void
dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
{
dhd_info_t *dhd = handle;
dhd_if_event_t *if_event = event_info;
struct net_device *ndev;
int ifidx, bssidx;
int ret;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
struct wireless_dev *vwdev, *primary_wdev;
struct net_device *primary_ndev;
#endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
if (event != DHD_WQ_WORK_IF_ADD) {
DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
return;
}
if (!dhd) {
DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
return;
}
if (!if_event) {
DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
return;
}
dhd_net_if_lock_local(dhd);
DHD_OS_WAKE_LOCK(&dhd->pub);
DHD_PERIM_LOCK(&dhd->pub);
ifidx = if_event->event.ifidx;
bssidx = if_event->event.bssidx;
DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
if_event->mac, bssidx, TRUE);
if (!ndev) {
DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__));
goto done;
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
if (unlikely(!vwdev)) {
WL_ERR(("Could not allocate wireless device\n"));
goto done;
}
primary_ndev = dhd->pub.info->iflist[0]->net;
primary_wdev = ndev_to_wdev(primary_ndev);
vwdev->wiphy = primary_wdev->wiphy;
vwdev->iftype = if_event->event.role;
vwdev->netdev = ndev;
ndev->ieee80211_ptr = vwdev;
SET_NETDEV_DEV(ndev, wiphy_dev(vwdev->wiphy));
DHD_ERROR(("virtual interface(%s) is created\n", if_event->name));
#endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
DHD_PERIM_UNLOCK(&dhd->pub);
ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
DHD_PERIM_LOCK(&dhd->pub);
if (ret != BCME_OK) {
DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
dhd_remove_if(&dhd->pub, ifidx, TRUE);
}
#ifdef PCIE_FULL_DONGLE
/* Turn on AP isolation in the firmware for interfaces operating in AP mode */
if (FW_SUPPORTED((&dhd->pub), ap) && !(DHD_IF_ROLE_STA(if_event->event.role))) {
char iovbuf[WLC_IOCTL_SMLEN];
uint32 var_int = 1;
memset(iovbuf, 0, sizeof(iovbuf));
bcm_mkiovar("ap_isolate", (char *)&var_int, 4, iovbuf, sizeof(iovbuf));
dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, ifidx);
}
#endif /* PCIE_FULL_DONGLE */
done:
MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
DHD_PERIM_UNLOCK(&dhd->pub);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
dhd_net_if_unlock_local(dhd);
}
static void
dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
{
dhd_info_t *dhd = handle;
int ifidx;
dhd_if_event_t *if_event = event_info;
if (event != DHD_WQ_WORK_IF_DEL) {
DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
return;
}
if (!dhd) {
DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
return;
}
if (!if_event) {
DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
return;
}
dhd_net_if_lock_local(dhd);
DHD_OS_WAKE_LOCK(&dhd->pub);
DHD_PERIM_LOCK(&dhd->pub);
ifidx = if_event->event.ifidx;
DHD_TRACE(("Removing interface with idx %d\n", ifidx));
dhd_remove_if(&dhd->pub, ifidx, TRUE);
MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
DHD_PERIM_UNLOCK(&dhd->pub);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
dhd_net_if_unlock_local(dhd);
}
static void
dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
{
dhd_info_t *dhd = handle;
dhd_if_t *ifp = event_info;
if (event != DHD_WQ_WORK_SET_MAC) {
DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
}
if (!dhd) {
DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
return;
}
dhd_net_if_lock_local(dhd);
DHD_OS_WAKE_LOCK(&dhd->pub);
DHD_PERIM_LOCK(&dhd->pub);
#ifdef SOFTAP
{
unsigned long flags;
bool in_ap = FALSE;
DHD_GENERAL_LOCK(&dhd->pub, flags);
in_ap = (ap_net_dev != NULL);
DHD_GENERAL_UNLOCK(&dhd->pub, flags);
if (in_ap) {
DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
ifp->net->name));
goto done;
}
}
#endif /* SOFTAP */
if (ifp == NULL || !dhd->pub.up) {
DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
goto done;
}
DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
ifp->set_macaddress = FALSE;
if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
else
DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
done:
DHD_PERIM_UNLOCK(&dhd->pub);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
dhd_net_if_unlock_local(dhd);
}
static void
dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
{
dhd_info_t *dhd = handle;
dhd_if_t *ifp = event_info;
int ifidx;
if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
return;
}
if (!dhd) {
DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
return;
}
dhd_net_if_lock_local(dhd);
DHD_OS_WAKE_LOCK(&dhd->pub);
DHD_PERIM_LOCK(&dhd->pub);
#ifdef SOFTAP
{
bool in_ap = FALSE;
unsigned long flags;
DHD_GENERAL_LOCK(&dhd->pub, flags);
in_ap = (ap_net_dev != NULL);
DHD_GENERAL_UNLOCK(&dhd->pub, flags);
if (in_ap) {
DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
ifp->net->name));
ifp->set_multicast = FALSE;
goto done;
}
}
#endif /* SOFTAP */
if (ifp == NULL || !dhd->pub.up) {
DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
goto done;
}
ifidx = ifp->idx;
_dhd_set_multicast_list(dhd, ifidx);
DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
done:
DHD_PERIM_UNLOCK(&dhd->pub);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
dhd_net_if_unlock_local(dhd);
}
static int
dhd_set_mac_address(struct net_device *dev, void *addr)
{
int ret = 0;
dhd_info_t *dhd = DHD_DEV_INFO(dev);
struct sockaddr *sa = (struct sockaddr *)addr;
int ifidx;
dhd_if_t *dhdif;
ifidx = dhd_net2idx(dhd, dev);
if (ifidx == DHD_BAD_IF)
return -1;
dhdif = dhd->iflist[ifidx];
dhd_net_if_lock_local(dhd);
memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
dhdif->set_macaddress = TRUE;
dhd_net_if_unlock_local(dhd);
dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
dhd_set_mac_addr_handler, DHD_WORK_PRIORITY_LOW);
return ret;
}
static void
dhd_set_multicast_list(struct net_device *dev)
{
dhd_info_t *dhd = DHD_DEV_INFO(dev);
int ifidx;
ifidx = dhd_net2idx(dhd, dev);
if (ifidx == DHD_BAD_IF)
return;
dhd->iflist[ifidx]->set_multicast = TRUE;
dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx],
DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WORK_PRIORITY_LOW);
}
#ifdef PROP_TXSTATUS
int
dhd_os_wlfc_block(dhd_pub_t *pub)
{
dhd_info_t *di = (dhd_info_t *)(pub->info);
ASSERT(di != NULL);
spin_lock_bh(&di->wlfc_spinlock);
return 1;
}
int
dhd_os_wlfc_unblock(dhd_pub_t *pub)
{
dhd_info_t *di = (dhd_info_t *)(pub->info);
ASSERT(di != NULL);
spin_unlock_bh(&di->wlfc_spinlock);
return 1;
}
#endif /* PROP_TXSTATUS */
#if defined(DHD_8021X_DUMP)
void
dhd_tx_dump(osl_t *osh, void *pkt)
{
uint8 *dump_data;
uint16 protocol;
dump_data = PKTDATA(osh, pkt);
protocol = (dump_data[12] << 8) | dump_data[13];
if (protocol == ETHER_TYPE_802_1X) {
DHD_ERROR(("ETHER_TYPE_802_1X [TX]: ver %d, type %d, replay %d\n",
dump_data[14], dump_data[15], dump_data[30]));
}
}
#endif /* DHD_8021X_DUMP */
int BCMFASTPATH
dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
{
int ret = BCME_OK;
dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
struct ether_header *eh = NULL;
/* Reject if down */
if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
/* free the packet here since the caller won't */
PKTFREE(dhdp->osh, pktbuf, TRUE);
return -ENODEV;
}
#ifdef PCIE_FULL_DONGLE
if (dhdp->busstate == DHD_BUS_SUSPEND) {
DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
PKTFREE(dhdp->osh, pktbuf, TRUE);
return -EBUSY;
}
#endif /* PCIE_FULL_DONGLE */
#ifdef DHD_UNICAST_DHCP
/* if dhcp_unicast is enabled, we need to convert the */
/* broadcast DHCP ACK/REPLY packets to Unicast. */
if (dhdp->dhcp_unicast) {
dhd_convert_dhcp_broadcast_ack_to_unicast(dhdp, pktbuf, ifidx);
}
#endif /* DHD_UNICAST_DHCP */
/* Update multicast statistic */
if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
eh = (struct ether_header *)pktdata;
if (ETHER_ISMULTI(eh->ether_dhost))
dhdp->tx_multicast++;
if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X) {
DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED);
atomic_inc(&dhd->pend_8021x_cnt);
}
#ifdef DHD_DHCP_DUMP
if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
uint16 dump_hex;
uint16 source_port;
uint16 dest_port;
uint16 udp_port_pos;
uint8 *ptr8 = (uint8 *)&pktdata[ETHER_HDR_LEN];
uint8 ip_header_len = (*ptr8 & 0x0f)<<2;
udp_port_pos = ETHER_HDR_LEN + ip_header_len;
source_port = (pktdata[udp_port_pos] << 8) | pktdata[udp_port_pos+1];
dest_port = (pktdata[udp_port_pos+2] << 8) | pktdata[udp_port_pos+3];
if (source_port == 0x0044 || dest_port == 0x0044) {
dump_hex = (pktdata[udp_port_pos+249] << 8) |
pktdata[udp_port_pos+250];
if (dump_hex == 0x0101) {
DHD_ERROR(("DHCP - DISCOVER [TX]\n"));
} else if (dump_hex == 0x0102) {
DHD_ERROR(("DHCP - OFFER [TX]\n"));
} else if (dump_hex == 0x0103) {
DHD_ERROR(("DHCP - REQUEST [TX]\n"));
} else if (dump_hex == 0x0105) {
DHD_ERROR(("DHCP - ACK [TX]\n"));
} else {
DHD_ERROR(("DHCP - 0x%X [TX]\n", dump_hex));
}
} else if (source_port == 0x0043 || dest_port == 0x0043) {
DHD_ERROR(("DHCP - BOOTP [RX]\n"));
}
}
#endif /* DHD_DHCP_DUMP */
} else {
PKTFREE(dhd->pub.osh, pktbuf, TRUE);
return BCME_ERROR;
}
#ifdef DHDTCPACK_SUPPRESS
/* If this packet has replaced another packet and got freed, just return */
if (dhd_tcpack_suppress(dhdp, pktbuf))
return ret;
#endif /* DHDTCPACK_SUPPRESS */
/* Look into the packet and update the packet priority */
#ifndef PKTPRIO_OVERRIDE
if (PKTPRIO(pktbuf) == 0)
#endif
pktsetprio(pktbuf, FALSE);
#ifdef PCIE_FULL_DONGLE
/*
* Lkup the per interface hash table, for a matching flowring. If one is not
* available, allocate a unique flowid and add a flowring entry.
* The found or newly created flowid is placed into the pktbuf's tag.
*/
ret = dhd_flowid_update(dhdp, ifidx, dhdp->flow_prio_map[(PKTPRIO(pktbuf))], pktbuf);
if (ret != BCME_OK) {
PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
return ret;
}
#endif
#ifdef PROP_TXSTATUS
if (dhd_wlfc_is_supported(dhdp)) {
/* store the interface ID */
DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
/* store destination MAC in the tag as well */
DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
/* decide which FIFO this packet belongs to */
if (ETHER_ISMULTI(eh->ether_dhost))
/* one additional queue index (highest AC + 1) is used for bc/mc queue */
DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
else
DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
} else
#endif /* PROP_TXSTATUS */
/* If the protocol uses a data header, apply it */
dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
/* Use bus module to send data frame */
#ifdef WLMEDIA_HTSF
dhd_htsf_addtxts(dhdp, pktbuf);
#endif
#if defined(DHD_8021X_DUMP)
dhd_tx_dump(dhdp->osh, pktbuf);
#endif
#ifdef PROP_TXSTATUS
{
if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
/* non-proptxstatus way */
#ifdef BCMPCIE
ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
#else
ret = dhd_bus_txdata(dhdp->bus, pktbuf);
#endif /* BCMPCIE */
}
}
#else
#ifdef BCMPCIE
ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
#else
ret = dhd_bus_txdata(dhdp->bus, pktbuf);
#endif /* BCMPCIE */
#endif /* PROP_TXSTATUS */
return ret;
}
int BCMFASTPATH
dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
{
int ret;
uint datalen;
void *pktbuf;
dhd_info_t *dhd = DHD_DEV_INFO(net);
dhd_if_t *ifp = NULL;
int ifidx;
#ifdef WLMEDIA_HTSF
uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz;
#else
uint8 htsfdlystat_sz = 0;
#endif
#ifdef DHD_WMF
struct ether_header *eh;
uint8 *iph;
#endif /* DHD_WMF */
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
DHD_OS_WAKE_LOCK(&dhd->pub);
DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
/* Reject if down */
if (dhd->pub.busstate == DHD_BUS_DOWN || dhd->pub.hang_was_sent) {
DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
__FUNCTION__, dhd->pub.up, dhd->pub.busstate));
netif_stop_queue(net);
/* Send Event when bus down detected during data session */
if (dhd->pub.up) {
DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
net_os_send_hang_message(net);
}
DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
return -ENODEV;
#else
return NETDEV_TX_BUSY;
#endif
}
ifp = DHD_DEV_IFP(net);
ifidx = DHD_DEV_IFIDX(net);
ASSERT(ifidx == dhd_net2idx(dhd, net));
ASSERT((ifp != NULL) && (ifp == dhd->iflist[ifidx]));
if (ifidx == DHD_BAD_IF) {
DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
netif_stop_queue(net);
DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
return -ENODEV;
#else
return NETDEV_TX_BUSY;
#endif
}
/* re-align socket buffer if "skb->data" is odd address */
if (((unsigned long)(skb->data)) & 0x1) {
unsigned char *data = skb->data;
uint32 length = skb->len;
PKTPUSH(dhd->pub.osh, skb, 1);
memmove(skb->data, data, length);
PKTSETLEN(dhd->pub.osh, skb, length);
}
datalen = PKTLEN(dhd->pub.osh, skb);
/* Make sure there's enough room for any header */
if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
struct sk_buff *skb2;
DHD_INFO(("%s: insufficient headroom\n",
dhd_ifname(&dhd->pub, ifidx)));
dhd->pub.tx_realloc++;
skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
dev_kfree_skb(skb);
if ((skb = skb2) == NULL) {
DHD_ERROR(("%s: skb_realloc_headroom failed\n",
dhd_ifname(&dhd->pub, ifidx)));
ret = -ENOMEM;
goto done;
}
}
/* Convert to packet */
if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
dhd_ifname(&dhd->pub, ifidx)));
dev_kfree_skb_any(skb);
ret = -ENOMEM;
goto done;
}
#ifdef WLMEDIA_HTSF
if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) {
uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf);
struct ether_header *eh = (struct ether_header *)pktdata;
if (!ETHER_ISMULTI(eh->ether_dhost) &&
(ntoh16(eh->ether_type) == ETHER_TYPE_IP)) {
eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS);
}
}
#endif
#ifdef DHD_WMF
eh = (struct ether_header *)PKTDATA(dhd->pub.osh, pktbuf);
iph = (uint8 *)eh + ETHER_HDR_LEN;
/* WMF processing for multicast packets
* Only IPv4 packets are handled
*/
if (ifp->wmf.wmf_enable && (ntoh16(eh->ether_type) == ETHER_TYPE_IP) &&
(IP_VER(iph) == IP_VER_4) && (ETHER_ISMULTI(eh->ether_dhost) ||
((IPV4_PROT(iph) == IP_PROT_IGMP) && dhd->pub.wmf_ucast_igmp))) {
#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
void *sdu_clone;
bool ucast_convert = FALSE;
#ifdef DHD_UCAST_UPNP
uint32 dest_ip;
dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
ucast_convert = dhd->pub.wmf_ucast_upnp && MCAST_ADDR_UPNP_SSDP(dest_ip);
#endif /* DHD_UCAST_UPNP */
#ifdef DHD_IGMP_UCQUERY
ucast_convert |= dhd->pub.wmf_ucast_igmp_query &&
(IPV4_PROT(iph) == IP_PROT_IGMP) &&
(*(iph + IPV4_HLEN(iph)) == IGMPV2_HOST_MEMBERSHIP_QUERY);
#endif /* DHD_IGMP_UCQUERY */
if (ucast_convert) {
dhd_sta_t *sta;
unsigned long flags;
DHD_IF_STA_LIST_LOCK(ifp, flags);
/* Convert upnp/igmp query to unicast for each assoc STA */
list_for_each_entry(sta, &ifp->sta_list, list) {
if ((sdu_clone = PKTDUP(dhd->pub.osh, pktbuf)) == NULL) {
DHD_IF_STA_LIST_UNLOCK(ifp, flags);
DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
return (WMF_NOP);
}
dhd_wmf_forward(ifp->wmf.wmfh, sdu_clone, 0, sta, 1);
}
DHD_IF_STA_LIST_UNLOCK(ifp, flags);
DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
PKTFREE(dhd->pub.osh, pktbuf, TRUE);
return NETDEV_TX_OK;
} else
#endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */
{
/* There will be no STA info if the packet is coming from LAN host
* Pass as NULL
*/
ret = dhd_wmf_packets_handle(&dhd->pub, pktbuf, NULL, ifidx, 0);
switch (ret) {
case WMF_TAKEN:
case WMF_DROP:
/* Either taken by WMF or we should drop it.
* Exiting send path
*/
DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
return NETDEV_TX_OK;
default:
/* Continue the transmit path */
break;
}
}
}
#endif /* DHD_WMF */
ret = dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
done:
if (ret) {
ifp->stats.tx_dropped++;
dhd->pub.tx_dropped++;
}
else {
dhd->pub.tx_packets++;
ifp->stats.tx_packets++;
ifp->stats.tx_bytes += datalen;
}
DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
/* Return ok: we always eat the packet */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
return 0;
#else
return NETDEV_TX_OK;
#endif
}
void
dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
{
struct net_device *net;
dhd_info_t *dhd = dhdp->info;
int i;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
ASSERT(dhd);
if (ifidx == ALL_INTERFACES) {
/* Flow control on all active interfaces */
dhdp->txoff = state;
for (i = 0; i < DHD_MAX_IFS; i++) {
if (dhd->iflist[i]) {
net = dhd->iflist[i]->net;
if (state == ON)
netif_stop_queue(net);
else
netif_wake_queue(net);
}
}
}
else {
if (dhd->iflist[ifidx]) {
net = dhd->iflist[ifidx]->net;
if (state == ON)
netif_stop_queue(net);
else
netif_wake_queue(net);
}
}
}
#ifdef DHD_RX_DUMP
typedef struct {
uint16 type;
const char *str;
} PKTTYPE_INFO;
static const PKTTYPE_INFO packet_type_info[] =
{
{ ETHER_TYPE_IP, "IP" },
{ ETHER_TYPE_ARP, "ARP" },
{ ETHER_TYPE_BRCM, "BRCM" },
{ ETHER_TYPE_802_1X, "802.1X" },
{ ETHER_TYPE_WAI, "WAPI" },
{ 0, ""}
};
static const char *_get_packet_type_str(uint16 type)
{
int i;
int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1;
for (i = 0; i < n; i++) {
if (packet_type_info[i].type == type)
return packet_type_info[i].str;
}
return packet_type_info[n].str;
}
#endif /* DHD_RX_DUMP */
#ifdef DHD_WMF
bool
dhd_is_rxthread_enabled(dhd_pub_t *dhdp)
{
dhd_info_t *dhd = dhdp->info;
return dhd->rxthread_enabled;
}
#endif /* DHD_WMF */
void
dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan,
int pkt_wake, wake_counts_t *wcp)
{
dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
struct sk_buff *skb;
uchar *eth;
uint len;
void *data, *pnext = NULL;
int i;
dhd_if_t *ifp;
wl_event_msg_t event;
int tout_rx = 0;
int tout_ctrl = 0;
void *skbhead = NULL;
void *skbprev = NULL;
uint16 protocol;
#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_WAKE_STATUS)
char *dump_data;
#endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_WAKE_STATUS */
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
struct ether_header *eh;
pnext = PKTNEXT(dhdp->osh, pktbuf);
PKTSETNEXT(dhdp->osh, pktbuf, NULL);
ifp = dhd->iflist[ifidx];
if (ifp == NULL) {
DHD_ERROR(("%s: ifp is NULL. drop packet\n",
__FUNCTION__));
PKTCFREE(dhdp->osh, pktbuf, FALSE);
continue;
}
eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
/* Dropping only data packets before registering net device to avoid kernel panic */
#ifndef PROP_TXSTATUS_VSDB
if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
(ntoh16(eh->ether_type) != ETHER_TYPE_BRCM)) {
#else
if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
(ntoh16(eh->ether_type) != ETHER_TYPE_BRCM)) {
#endif /* PROP_TXSTATUS_VSDB */
DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
__FUNCTION__));
PKTCFREE(dhdp->osh, pktbuf, FALSE);
continue;
}
#ifdef PROP_TXSTATUS
if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
/* WLFC may send header only packet when
there is an urgent message but no packet to
piggy-back on
*/
PKTCFREE(dhdp->osh, pktbuf, FALSE);
continue;
}
#endif
#ifdef DHD_L2_FILTER
/* If block_ping is enabled drop the ping packet */
if (dhdp->block_ping) {
if (dhd_l2_filter_block_ping(dhdp, pktbuf, ifidx) == BCME_OK) {
PKTFREE(dhdp->osh, pktbuf, FALSE);
continue;
}
}
#endif
#ifdef DHD_WMF
/* WMF processing for multicast packets */
if (ifp->wmf.wmf_enable && (ETHER_ISMULTI(eh->ether_dhost))) {
dhd_sta_t *sta;
int ret;
sta = dhd_find_sta(dhdp, ifidx, (void *)eh->ether_shost);
ret = dhd_wmf_packets_handle(dhdp, pktbuf, sta, ifidx, 1);
switch (ret) {
case WMF_TAKEN:
/* The packet is taken by WMF. Continue to next iteration */
continue;
case WMF_DROP:
/* Packet DROP decision by WMF. Toss it */
DHD_ERROR(("%s: WMF decides to drop packet\n",
__FUNCTION__));
PKTCFREE(dhdp->osh, pktbuf, FALSE);
continue;
default:
/* Continue the transmit path */
break;
}
}
#endif /* DHD_WMF */
#ifdef DHDTCPACK_SUPPRESS
dhd_tcpdata_info_get(dhdp, pktbuf);
#endif
skb = PKTTONATIVE(dhdp->osh, pktbuf);
ifp = dhd->iflist[ifidx];
if (ifp == NULL)
ifp = dhd->iflist[0];
ASSERT(ifp);
skb->dev = ifp->net;
#ifdef PCIE_FULL_DONGLE
if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
(!ifp->ap_isolate)) {
eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
if (ETHER_ISUCAST(eh->ether_dhost)) {
if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
dhd_sendpkt(dhdp, ifidx, pktbuf);
continue;
}
} else {
void *npktbuf = PKTDUP(dhdp->osh, pktbuf);
dhd_sendpkt(dhdp, ifidx, npktbuf);
}
}
#endif /* PCIE_FULL_DONGLE */
/* Get the protocol, maintain skb around eth_type_trans()
* The main reason for this hack is for the limitation of
* Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
* to perform skb_pull inside vs ETH_HLEN. Since to avoid
* coping of the packet coming from the network stack to add
* BDC, Hardware header etc, during network interface registration
* we set the 'net->hard_header_len' to ETH_HLEN + extra space required
* for BDC, Hardware header etc. and not just the ETH_HLEN
*/
eth = skb->data;
len = skb->len;
protocol = (skb->data[12] << 8) | skb->data[13];
if (protocol == ETHER_TYPE_802_1X) {
DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED);
}
#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP) \
|| defined(DHD_WAKE_STATUS)
dump_data = skb->data;
#endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP */
#ifdef DHD_8021X_DUMP
if (protocol == ETHER_TYPE_802_1X) {
DHD_ERROR(("ETHER_TYPE_802_1X [RX]: "
"ver %d, type %d, replay %d\n",
dump_data[14], dump_data[15],
dump_data[30]));
}
#endif /* DHD_8021X_DUMP */
#ifdef DHD_DHCP_DUMP
if (protocol != ETHER_TYPE_BRCM && protocol == ETHER_TYPE_IP) {
uint16 dump_hex;
uint16 source_port;
uint16 dest_port;
uint16 udp_port_pos;
uint8 *ptr8 = (uint8 *)&dump_data[ETHER_HDR_LEN];
uint8 ip_header_len = (*ptr8 & 0x0f)<<2;
udp_port_pos = ETHER_HDR_LEN + ip_header_len;
source_port = (dump_data[udp_port_pos] << 8) | dump_data[udp_port_pos+1];
dest_port = (dump_data[udp_port_pos+2] << 8) | dump_data[udp_port_pos+3];
if (source_port == 0x0044 || dest_port == 0x0044) {
dump_hex = (dump_data[udp_port_pos+249] << 8) |
dump_data[udp_port_pos+250];
if (dump_hex == 0x0101) {
DHD_ERROR(("DHCP - DISCOVER [RX]\n"));
} else if (dump_hex == 0x0102) {
DHD_ERROR(("DHCP - OFFER [RX]\n"));
} else if (dump_hex == 0x0103) {
DHD_ERROR(("DHCP - REQUEST [RX]\n"));
} else if (dump_hex == 0x0105) {
DHD_ERROR(("DHCP - ACK [RX]\n"));
} else {
DHD_ERROR(("DHCP - 0x%X [RX]\n", dump_hex));
}
} else if (source_port == 0x0043 || dest_port == 0x0043) {
DHD_ERROR(("DHCP - BOOTP [RX]\n"));
}
}
#endif /* DHD_DHCP_DUMP */
#if defined(DHD_RX_DUMP)
DHD_ERROR(("RX DUMP - %s\n", _get_packet_type_str(protocol)));
if (protocol != ETHER_TYPE_BRCM) {
if (dump_data[0] == 0xFF) {
DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__));
if ((dump_data[12] == 8) &&
(dump_data[13] == 6)) {
DHD_ERROR(("%s: ARP %d\n",
__FUNCTION__, dump_data[0x15]));
}
} else if (dump_data[0] & 1) {
DHD_ERROR(("%s: MULTICAST: " MACDBG "\n",
__FUNCTION__, MAC2STRDBG(dump_data)));
}
#ifdef DHD_RX_FULL_DUMP
{
int k;
for (k = 0; k < skb->len; k++) {
DHD_ERROR(("%02X ", dump_data[k]));
if ((k & 15) == 15)
DHD_ERROR(("\n"));
}
DHD_ERROR(("\n"));
}
#endif /* DHD_RX_FULL_DUMP */
}
#endif /* DHD_RX_DUMP */
skb->protocol = eth_type_trans(skb, skb->dev);
if (skb->pkt_type == PACKET_MULTICAST) {
dhd->pub.rx_multicast++;
ifp->stats.multicast++;
}
skb->data = eth;
skb->len = len;
#ifdef WLMEDIA_HTSF
dhd_htsf_addrxts(dhdp, pktbuf);
#endif
/* Strip header, count, deliver upward */
skb_pull(skb, ETH_HLEN);
/* Process special event packets and then discard them */
memset(&event, 0, sizeof(event));
if ((ntoh16(skb->protocol) == ETHER_TYPE_BRCM) &&
(len >= sizeof(bcm_event_t))) {
dhd_wl_host_event(dhd, &ifidx,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
skb_mac_header(skb),
#else
skb->mac.raw,
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
len - 2,
&event,
&data);
wl_event_to_host_order(&event);
if (!tout_ctrl)
tout_ctrl = DHD_PACKET_TIMEOUT_MS;
#if defined(PNO_SUPPORT)
if (event.event_type == WLC_E_PFN_NET_FOUND) {
/* enforce custom wake lock to garantee that Kernel not suspended */
tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
}
#endif /* PNO_SUPPORT */
#ifdef DHD_WAKE_STATUS
if (unlikely(pkt_wake)) {
wcp->rcwake++;
#ifdef DHD_WAKE_EVENT_STATUS
if (event.event_type < WLC_E_LAST)
wcp->rc_event[event.event_type]++;
#endif
pkt_wake = 0;
}
#endif
#ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
PKTFREE(dhdp->osh, pktbuf, FALSE);
continue;
#endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */