| /* |
| * Broadcom Dongle Host Driver (DHD), Linux-specific network interface |
| * Basically selected code segments from usb-cdc.c and usb-rndis.c |
| * |
| * Copyright (C) 1999-2017, Broadcom Corporation |
| * |
| * Unless you and Broadcom execute a separate written software license |
| * agreement governing use of this software, this software is licensed to you |
| * under the terms of the GNU General Public License version 2 (the "GPL"), |
| * available at http://www.broadcom.com/licenses/GPLv2.php, with the |
| * following added to such license: |
| * |
| * As a special exception, the copyright holders of this software give you |
| * permission to link this software with independent modules, and to copy and |
| * distribute the resulting executable under terms of your choice, provided that |
| * you also meet, for each linked independent module, the terms and conditions of |
| * the license of that module. An independent module is a module which is not |
| * derived from this software. The special exception does not apply to any |
| * modifications of the software. |
| * |
| * Notwithstanding the above, under no circumstances may you combine this |
| * software in any way with any other Broadcom software provided under a license |
| * other than the GPL, without Broadcom's express prior written consent. |
| * |
| * |
| * <<Broadcom-WL-IPTag/Open:>> |
| * |
| * $Id: dhd_linux.c 710862 2017-07-14 07:43:59Z $ |
| */ |
| |
| #include <typedefs.h> |
| #include <linuxver.h> |
| #include <osl.h> |
| #ifdef SHOW_LOGTRACE |
| #include <linux/syscalls.h> |
| #include <event_log.h> |
| #endif /* SHOW_LOGTRACE */ |
| |
| #include <linux/init.h> |
| #include <linux/kernel.h> |
| #include <linux/slab.h> |
| #include <linux/skbuff.h> |
| #include <linux/netdevice.h> |
| #include <linux/inetdevice.h> |
| #include <linux/rtnetlink.h> |
| #include <linux/etherdevice.h> |
| #include <linux/random.h> |
| #include <linux/spinlock.h> |
| #include <linux/ethtool.h> |
| #include <linux/fcntl.h> |
| #include <linux/fs.h> |
| #include <linux/ip.h> |
| #include <linux/reboot.h> |
| #include <linux/notifier.h> |
| #include <net/addrconf.h> |
| #ifdef ENABLE_ADAPTIVE_SCHED |
| #include <linux/cpufreq.h> |
| #endif /* ENABLE_ADAPTIVE_SCHED */ |
| |
| #include <asm/uaccess.h> |
| #include <asm/unaligned.h> |
| |
| #include <epivers.h> |
| #include <bcmutils.h> |
| #include <bcmendian.h> |
| #include <bcmdevs.h> |
| |
| |
| #include <ethernet.h> |
| #include <bcmevent.h> |
| #include <vlan.h> |
| #include <802.3.h> |
| |
| #include <dngl_stats.h> |
| #include <dhd_linux_wq.h> |
| #include <dhd.h> |
| #include <dhd_linux.h> |
| #ifdef DHD_WET |
| #include <dhd_wet.h> |
| #endif /* DHD_WET */ |
| #ifdef PCIE_FULL_DONGLE |
| #include <dhd_flowring.h> |
| #endif |
| #include <dhd_bus.h> |
| #include <dhd_proto.h> |
| #include <dhd_config.h> |
| #ifdef WL_ESCAN |
| #include <wl_escan.h> |
| #endif |
| #include <dhd_dbg.h> |
| #include <dhd_debug.h> |
| #ifdef CONFIG_HAS_WAKELOCK |
| #include <linux/wakelock.h> |
| #endif |
| #ifdef WL_CFG80211 |
| #include <wl_cfg80211.h> |
| #endif |
| #ifdef PNO_SUPPORT |
| #include <dhd_pno.h> |
| #endif |
| #ifdef RTT_SUPPORT |
| #include <dhd_rtt.h> |
| #endif |
| #ifdef DHD_TIMESYNC |
| #include <dhd_timesync.h> |
| #endif /* DHD_TIMESYNC */ |
| |
| #ifdef CONFIG_COMPAT |
| #include <linux/compat.h> |
| #endif |
| |
| #if defined(CONFIG_SOC_EXYNOS8895) |
| #include <linux/exynos-pci-ctrl.h> |
| #endif /* CONFIG_SOC_EXYNOS8895 */ |
| |
| #ifdef DHD_WMF |
| #include <dhd_wmf_linux.h> |
| #endif /* DHD_WMF */ |
| |
| #ifdef DHD_L2_FILTER |
| #include <bcmicmp.h> |
| #include <bcm_l2_filter.h> |
| #include <dhd_l2_filter.h> |
| #endif /* DHD_L2_FILTER */ |
| |
| #ifdef DHD_PSTA |
| #include <dhd_psta.h> |
| #endif /* DHD_PSTA */ |
| |
| |
| #ifdef DHDTCPACK_SUPPRESS |
| #include <dhd_ip.h> |
| #endif /* DHDTCPACK_SUPPRESS */ |
| #include <dhd_daemon.h> |
| #ifdef DHD_PKT_LOGGING |
| #include <dhd_pktlog.h> |
| #endif /* DHD_PKT_LOGGING */ |
| #if defined(STAT_REPORT) |
| #include <wl_statreport.h> |
| #endif /* STAT_REPORT */ |
| #ifdef DHD_DEBUG_PAGEALLOC |
| typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len); |
| void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len); |
| extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle); |
| #endif /* DHD_DEBUG_PAGEALLOC */ |
| |
| |
| #if defined(DHD_LB) |
| #if !defined(PCIE_FULL_DONGLE) |
| #error "DHD Loadbalancing only supported on PCIE_FULL_DONGLE" |
| #endif /* !PCIE_FULL_DONGLE */ |
| #endif /* DHD_LB */ |
| |
| #if defined(DHD_LB_RXP) || defined(DHD_LB_RXC) || defined(DHD_LB_TXC) || \ |
| defined(DHD_LB_STATS) |
| #if !defined(DHD_LB) |
| #error "DHD loadbalance derivatives are supported only if DHD_LB is defined" |
| #endif /* !DHD_LB */ |
| #endif /* DHD_LB_RXP || DHD_LB_RXC || DHD_LB_TXC || DHD_LB_STATS */ |
| |
| #if defined(DHD_LB) |
| /* Dynamic CPU selection for load balancing */ |
| #include <linux/cpu.h> |
| #include <linux/cpumask.h> |
| #include <linux/notifier.h> |
| #include <linux/workqueue.h> |
| #include <asm/atomic.h> |
| |
| #if !defined(DHD_LB_PRIMARY_CPUS) |
| #define DHD_LB_PRIMARY_CPUS 0x0 /* Big CPU coreids mask */ |
| #endif |
| #if !defined(DHD_LB_SECONDARY_CPUS) |
| #define DHD_LB_SECONDARY_CPUS 0xFE /* Little CPU coreids mask */ |
| #endif |
| |
| #define HIST_BIN_SIZE 9 |
| |
| static void dhd_rx_napi_dispatcher_fn(struct work_struct * work); |
| |
| #if defined(DHD_LB_TXP) |
| static void dhd_lb_tx_handler(unsigned long data); |
| static void dhd_tx_dispatcher_work(struct work_struct * work); |
| static void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp); |
| static void dhd_lb_tx_dispatch(dhd_pub_t *dhdp); |
| |
| /* Pkttag not compatible with PROP_TXSTATUS or WLFC */ |
| typedef struct dhd_tx_lb_pkttag_fr { |
| struct net_device *net; |
| int ifidx; |
| } dhd_tx_lb_pkttag_fr_t; |
| |
| #define DHD_LB_TX_PKTTAG_SET_NETDEV(tag, netdevp) ((tag)->net = netdevp) |
| #define DHD_LB_TX_PKTTAG_NETDEV(tag) ((tag)->net) |
| |
| #define DHD_LB_TX_PKTTAG_SET_IFIDX(tag, ifidx) ((tag)->ifidx = ifidx) |
| #define DHD_LB_TX_PKTTAG_IFIDX(tag) ((tag)->ifidx) |
| #endif /* DHD_LB_TXP */ |
| #endif /* DHD_LB */ |
| |
| #ifdef HOFFLOAD_MODULES |
| #include <linux/firmware.h> |
| #endif |
| |
| #ifdef WLMEDIA_HTSF |
| #include <linux/time.h> |
| #include <htsf.h> |
| |
| #define HTSF_MINLEN 200 /* min. packet length to timestamp */ |
| #define HTSF_BUS_DELAY 150 /* assume a fix propagation in us */ |
| #define TSMAX 1000 /* max no. of timing record kept */ |
| #define NUMBIN 34 |
| |
| static uint32 tsidx = 0; |
| static uint32 htsf_seqnum = 0; |
| uint32 tsfsync; |
| struct timeval tsync; |
| static uint32 tsport = 5010; |
| |
| typedef struct histo_ { |
| uint32 bin[NUMBIN]; |
| } histo_t; |
| |
| #if !ISPOWEROF2(DHD_SDALIGN) |
| #error DHD_SDALIGN is not a power of 2! |
| #endif |
| |
| static histo_t vi_d1, vi_d2, vi_d3, vi_d4; |
| #endif /* WLMEDIA_HTSF */ |
| |
| #ifdef WL_MONITOR |
| #include <bcmmsgbuf.h> |
| #include <bcmwifi_monitor.h> |
| #endif |
| |
| #define htod32(i) (i) |
| #define htod16(i) (i) |
| #define dtoh32(i) (i) |
| #define dtoh16(i) (i) |
| #define htodchanspec(i) (i) |
| #define dtohchanspec(i) (i) |
| |
| #ifdef STBLINUX |
| #ifdef quote_str |
| #undef quote_str |
| #endif /* quote_str */ |
| #ifdef to_str |
| #undef to_str |
| #endif /* quote_str */ |
| #define to_str(s) #s |
| #define quote_str(s) to_str(s) |
| |
| static char *driver_target = "driver_target: "quote_str(BRCM_DRIVER_TARGET); |
| #endif /* STBLINUX */ |
| |
| |
| |
| #if defined(SOFTAP) |
| extern bool ap_cfg_running; |
| extern bool ap_fw_loaded; |
| #endif |
| |
| #ifdef DHD_8021X_DUMP |
| extern void dhd_dump_eapol_4way_message(char *ifname, char *dump_data, bool direction); |
| #endif /* DHD_8021X_DUMP */ |
| |
| #ifdef FIX_CPU_MIN_CLOCK |
| #include <linux/pm_qos.h> |
| #endif /* FIX_CPU_MIN_CLOCK */ |
| |
| #ifdef SET_RANDOM_MAC_SOFTAP |
| #ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL |
| #define CONFIG_DHD_SET_RANDOM_MAC_VAL 0x001A11 |
| #endif |
| static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL; |
| #endif /* SET_RANDOM_MAC_SOFTAP */ |
| |
| #ifdef ENABLE_ADAPTIVE_SCHED |
| #define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */ |
| #ifndef CUSTOM_CPUFREQ_THRESH |
| #define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH |
| #endif /* CUSTOM_CPUFREQ_THRESH */ |
| #endif /* ENABLE_ADAPTIVE_SCHED */ |
| |
| /* enable HOSTIP cache update from the host side when an eth0:N is up */ |
| #define AOE_IP_ALIAS_SUPPORT 1 |
| |
| #ifdef BCM_FD_AGGR |
| #include <bcm_rpc.h> |
| #include <bcm_rpc_tp.h> |
| #endif |
| #ifdef PROP_TXSTATUS |
| #include <wlfc_proto.h> |
| #include <dhd_wlfc.h> |
| #endif |
| |
| #include <wl_android.h> |
| |
| /* Maximum STA per radio */ |
| #define DHD_MAX_STA 32 |
| |
| |
| |
| const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 }; |
| const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 }; |
| #define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]] |
| |
| #ifdef ARP_OFFLOAD_SUPPORT |
| void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx); |
| static int dhd_inetaddr_notifier_call(struct notifier_block *this, |
| unsigned long event, void *ptr); |
| static struct notifier_block dhd_inetaddr_notifier = { |
| .notifier_call = dhd_inetaddr_notifier_call |
| }; |
| /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be |
| * created in kernel notifier link list (with 'next' pointing to itself) |
| */ |
| static bool dhd_inetaddr_notifier_registered = FALSE; |
| #endif /* ARP_OFFLOAD_SUPPORT */ |
| |
| #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) |
| int dhd_inet6addr_notifier_call(struct notifier_block *this, |
| unsigned long event, void *ptr); |
| static struct notifier_block dhd_inet6addr_notifier = { |
| .notifier_call = dhd_inet6addr_notifier_call |
| }; |
| /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be |
| * created in kernel notifier link list (with 'next' pointing to itself) |
| */ |
| static bool dhd_inet6addr_notifier_registered = FALSE; |
| #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ |
| |
| #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) |
| #include <linux/suspend.h> |
| volatile bool dhd_mmc_suspend = FALSE; |
| DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait); |
| #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */ |
| |
| #if defined(OOB_INTR_ONLY) || defined(FORCE_WOWLAN) |
| extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable); |
| #endif |
| #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) |
| static void dhd_hang_process(void *dhd_info, void *event_data, u8 event); |
| #endif |
| #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) |
| MODULE_LICENSE("GPL and additional rights"); |
| #endif /* LinuxVer */ |
| |
| #ifdef CONFIG_BCM_DETECT_CONSECUTIVE_HANG |
| #define MAX_CONSECUTIVE_HANG_COUNTS 5 |
| #endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */ |
| |
| #include <dhd_bus.h> |
| |
| #ifdef DHD_ULP |
| #include <dhd_ulp.h> |
| #endif /* DHD_ULP */ |
| |
| #ifdef BCM_FD_AGGR |
| #define DBUS_RX_BUFFER_SIZE_DHD(net) (BCM_RPC_TP_DNGL_AGG_MAX_BYTE) |
| #else |
| #ifndef PROP_TXSTATUS |
| #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen) |
| #else |
| #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128) |
| #endif |
| #endif /* BCM_FD_AGGR */ |
| |
| #ifdef PROP_TXSTATUS |
| extern bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx); |
| extern void dhd_wlfc_plat_init(void *dhd); |
| extern void dhd_wlfc_plat_deinit(void *dhd); |
| #endif /* PROP_TXSTATUS */ |
| extern uint sd_f2_blocksize; |
| extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size); |
| |
| #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) |
| const char * |
| print_tainted() |
| { |
| return ""; |
| } |
| #endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */ |
| |
| /* Linux wireless extension support */ |
| #if defined(WL_WIRELESS_EXT) |
| #include <wl_iw.h> |
| extern wl_iw_extra_params_t g_wl_iw_params; |
| #endif /* defined(WL_WIRELESS_EXT) */ |
| |
| #ifdef CONFIG_PARTIALSUSPEND_SLP |
| #include <linux/partialsuspend_slp.h> |
| #define CONFIG_HAS_EARLYSUSPEND |
| #define DHD_USE_EARLYSUSPEND |
| #define register_early_suspend register_pre_suspend |
| #define unregister_early_suspend unregister_pre_suspend |
| #define early_suspend pre_suspend |
| #define EARLY_SUSPEND_LEVEL_BLANK_SCREEN 50 |
| #else |
| #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) |
| #include <linux/earlysuspend.h> |
| #endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */ |
| #endif /* CONFIG_PARTIALSUSPEND_SLP */ |
| |
| #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) |
| #include <linux/nl80211.h> |
| #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */ |
| |
| #if defined(BCMPCIE) |
| extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval); |
| #else |
| extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd); |
| #endif /* OEM_ANDROID && BCMPCIE */ |
| |
| #ifdef PKT_FILTER_SUPPORT |
| extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg); |
| extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode); |
| extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id); |
| #endif |
| |
| #if defined(PKT_FILTER_SUPPORT) && defined(APF) |
| static int __dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id, |
| u8* program, uint32 program_len); |
| static int __dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id, |
| uint32 mode, uint32 enable); |
| static int __dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id); |
| #endif /* PKT_FILTER_SUPPORT && APF */ |
| |
| |
| |
| static INLINE int argos_register_notifier_init(struct net_device *net) { return 0;} |
| static INLINE int argos_register_notifier_deinit(void) { return 0;} |
| |
| #if defined(BT_OVER_SDIO) |
| extern void wl_android_set_wifi_on_flag(bool enable); |
| #endif /* BT_OVER_SDIO */ |
| |
| |
| #if defined(TRAFFIC_MGMT_DWM) |
| void traffic_mgmt_pkt_set_prio(dhd_pub_t *dhdp, void * pktbuf); |
| #endif |
| |
| #ifdef DHD_FW_COREDUMP |
| static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event); |
| #endif /* DHD_FW_COREDUMP */ |
| #ifdef DHD_LOG_DUMP |
| #define DLD_BUFFER_NUM 2 |
| /* [0]: General, [1]: Special */ |
| struct dhd_log_dump_buf g_dld_buf[DLD_BUFFER_NUM]; |
| static const int dld_buf_size[] = { |
| (1024 * 1024), /* DHD_LOG_DUMP_BUFFER_SIZE */ |
| (8 * 1024) /* DHD_LOG_DUMP_BUFFER_EX_SIZE */ |
| }; |
| static void dhd_log_dump_init(dhd_pub_t *dhd); |
| static void dhd_log_dump_deinit(dhd_pub_t *dhd); |
| static void dhd_log_dump(void *handle, void *event_info, u8 event); |
| void dhd_schedule_log_dump(dhd_pub_t *dhdp); |
| static int do_dhd_log_dump(dhd_pub_t *dhdp); |
| #endif /* DHD_LOG_DUMP */ |
| |
| #ifdef DHD_DEBUG_UART |
| #include <linux/kmod.h> |
| #define DHD_DEBUG_UART_EXEC_PATH "/system/bin/wldu" |
| static void dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event); |
| static void dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd); |
| #endif /* DHD_DEBUG_UART */ |
| |
| static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused); |
| static struct notifier_block dhd_reboot_notifier = { |
| .notifier_call = dhd_reboot_callback, |
| .priority = 1, |
| }; |
| |
| #ifdef BCMPCIE |
| static int is_reboot = 0; |
| #endif /* BCMPCIE */ |
| |
| #if defined(BT_OVER_SDIO) |
| #include "dhd_bt_interface.h" |
| dhd_pub_t *g_dhd_pub = NULL; |
| #endif /* defined (BT_OVER_SDIO) */ |
| |
| atomic_t exit_in_progress = ATOMIC_INIT(0); |
| |
| typedef struct dhd_if_event { |
| struct list_head list; |
| wl_event_data_if_t event; |
| char name[IFNAMSIZ+1]; |
| uint8 mac[ETHER_ADDR_LEN]; |
| } dhd_if_event_t; |
| |
| /* Interface control information */ |
| typedef struct dhd_if { |
| struct dhd_info *info; /* back pointer to dhd_info */ |
| /* OS/stack specifics */ |
| struct net_device *net; |
| int idx; /* iface idx in dongle */ |
| uint subunit; /* subunit */ |
| uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */ |
| bool set_macaddress; |
| bool set_multicast; |
| uint8 bssidx; /* bsscfg index for the interface */ |
| bool attached; /* Delayed attachment when unset */ |
| bool txflowcontrol; /* Per interface flow control indicator */ |
| char name[IFNAMSIZ+1]; /* linux interface name */ |
| char dngl_name[IFNAMSIZ+1]; /* corresponding dongle interface name */ |
| struct net_device_stats stats; |
| #ifdef DHD_WMF |
| dhd_wmf_t wmf; /* per bsscfg wmf setting */ |
| bool wmf_psta_disable; /* enable/disable MC pkt to each mac |
| * of MC group behind PSTA |
| */ |
| #endif /* DHD_WMF */ |
| #ifdef PCIE_FULL_DONGLE |
| struct list_head sta_list; /* sll of associated stations */ |
| #if !defined(BCM_GMAC3) |
| spinlock_t sta_list_lock; /* lock for manipulating sll */ |
| #endif /* ! BCM_GMAC3 */ |
| #endif /* PCIE_FULL_DONGLE */ |
| uint32 ap_isolate; /* ap-isolation settings */ |
| #ifdef DHD_L2_FILTER |
| bool parp_enable; |
| bool parp_discard; |
| bool parp_allnode; |
| arp_table_t *phnd_arp_table; |
| /* for Per BSS modification */ |
| bool dhcp_unicast; |
| bool block_ping; |
| bool grat_arp; |
| #endif /* DHD_L2_FILTER */ |
| #ifdef DHD_MCAST_REGEN |
| bool mcast_regen_bss_enable; |
| #endif |
| bool rx_pkt_chainable; /* set all rx packet to chainable config by default */ |
| cumm_ctr_t cumm_ctr; /* cummulative queue length of child flowrings */ |
| } dhd_if_t; |
| |
| #ifdef WLMEDIA_HTSF |
| typedef struct { |
| uint32 low; |
| uint32 high; |
| } tsf_t; |
| |
| typedef struct { |
| uint32 last_cycle; |
| uint32 last_sec; |
| uint32 last_tsf; |
| uint32 coef; /* scaling factor */ |
| uint32 coefdec1; /* first decimal */ |
| uint32 coefdec2; /* second decimal */ |
| } htsf_t; |
| |
| typedef struct { |
| uint32 t1; |
| uint32 t2; |
| uint32 t3; |
| uint32 t4; |
| } tstamp_t; |
| |
| static tstamp_t ts[TSMAX]; |
| static tstamp_t maxdelayts; |
| static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0; |
| |
| #endif /* WLMEDIA_HTSF */ |
| |
| struct ipv6_work_info_t { |
| uint8 if_idx; |
| char ipv6_addr[IPV6_ADDR_LEN]; |
| unsigned long event; |
| }; |
| static void dhd_process_daemon_msg(struct sk_buff *skb); |
| static void dhd_destroy_to_notifier_skt(void); |
| static int dhd_create_to_notifier_skt(void); |
| static struct sock *nl_to_event_sk = NULL; |
| int sender_pid = 0; |
| |
| #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) |
| struct netlink_kernel_cfg g_cfg = { |
| .groups = 1, |
| .input = dhd_process_daemon_msg, |
| }; |
| #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */ |
| |
| typedef struct dhd_dump { |
| uint8 *buf; |
| int bufsize; |
| } dhd_dump_t; |
| |
| |
| /* When Perimeter locks are deployed, any blocking calls must be preceeded |
| * with a PERIM UNLOCK and followed by a PERIM LOCK. |
| * Examples of blocking calls are: schedule_timeout(), down_interruptible(), |
| * wait_event_timeout(). |
| */ |
| |
| /* Local private structure (extension of pub) */ |
| typedef struct dhd_info { |
| #if defined(WL_WIRELESS_EXT) |
| wl_iw_t iw; /* wireless extensions state (must be first) */ |
| #endif /* defined(WL_WIRELESS_EXT) */ |
| dhd_pub_t pub; |
| dhd_if_t *iflist[DHD_MAX_IFS]; /* for supporting multiple interfaces */ |
| |
| void *adapter; /* adapter information, interrupt, fw path etc. */ |
| char fw_path[PATH_MAX]; /* path to firmware image */ |
| char nv_path[PATH_MAX]; /* path to nvram vars file */ |
| char clm_path[PATH_MAX]; /* path to clm vars file */ |
| char conf_path[PATH_MAX]; /* path to config vars file */ |
| #ifdef DHD_UCODE_DOWNLOAD |
| char uc_path[PATH_MAX]; /* path to ucode image */ |
| #endif /* DHD_UCODE_DOWNLOAD */ |
| |
| /* serialize dhd iovars */ |
| struct mutex dhd_iovar_mutex; |
| |
| struct semaphore proto_sem; |
| #ifdef PROP_TXSTATUS |
| spinlock_t wlfc_spinlock; |
| |
| #endif /* PROP_TXSTATUS */ |
| #ifdef WLMEDIA_HTSF |
| htsf_t htsf; |
| #endif |
| wait_queue_head_t ioctl_resp_wait; |
| wait_queue_head_t d3ack_wait; |
| wait_queue_head_t dhd_bus_busy_state_wait; |
| uint32 default_wd_interval; |
| |
| struct timer_list timer; |
| bool wd_timer_valid; |
| #ifdef DHD_PCIE_RUNTIMEPM |
| struct timer_list rpm_timer; |
| bool rpm_timer_valid; |
| tsk_ctl_t thr_rpm_ctl; |
| #endif /* DHD_PCIE_RUNTIMEPM */ |
| struct tasklet_struct tasklet; |
| spinlock_t sdlock; |
| spinlock_t txqlock; |
| spinlock_t rxqlock; |
| spinlock_t dhd_lock; |
| |
| struct semaphore sdsem; |
| tsk_ctl_t thr_dpc_ctl; |
| tsk_ctl_t thr_wdt_ctl; |
| |
| tsk_ctl_t thr_rxf_ctl; |
| spinlock_t rxf_lock; |
| bool rxthread_enabled; |
| |
| /* Wakelocks */ |
| #if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) |
| struct wake_lock wl_wifi; /* Wifi wakelock */ |
| struct wake_lock wl_rxwake; /* Wifi rx wakelock */ |
| struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */ |
| struct wake_lock wl_wdwake; /* Wifi wd wakelock */ |
| struct wake_lock wl_evtwake; /* Wifi event wakelock */ |
| struct wake_lock wl_pmwake; /* Wifi pm handler wakelock */ |
| struct wake_lock wl_txflwake; /* Wifi tx flow wakelock */ |
| #ifdef BCMPCIE_OOB_HOST_WAKE |
| struct wake_lock wl_intrwake; /* Host wakeup wakelock */ |
| #endif /* BCMPCIE_OOB_HOST_WAKE */ |
| #ifdef DHD_USE_SCAN_WAKELOCK |
| struct wake_lock wl_scanwake; /* Wifi scan wakelock */ |
| #endif /* DHD_USE_SCAN_WAKELOCK */ |
| #endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */ |
| |
| #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) |
| /* net_device interface lock, prevent race conditions among net_dev interface |
| * calls and wifi_on or wifi_off |
| */ |
| struct mutex dhd_net_if_mutex; |
| struct mutex dhd_suspend_mutex; |
| #if defined(PKT_FILTER_SUPPORT) && defined(APF) |
| struct mutex dhd_apf_mutex; |
| #endif /* PKT_FILTER_SUPPORT && APF */ |
| #endif |
| spinlock_t wakelock_spinlock; |
| spinlock_t wakelock_evt_spinlock; |
| uint32 wakelock_counter; |
| int wakelock_wd_counter; |
| int wakelock_rx_timeout_enable; |
| int wakelock_ctrl_timeout_enable; |
| bool waive_wakelock; |
| uint32 wakelock_before_waive; |
| |
| /* Thread to issue ioctl for multicast */ |
| wait_queue_head_t ctrl_wait; |
| atomic_t pend_8021x_cnt; |
| dhd_attach_states_t dhd_state; |
| #ifdef SHOW_LOGTRACE |
| dhd_event_log_t event_data; |
| #endif /* SHOW_LOGTRACE */ |
| |
| #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) |
| struct early_suspend early_suspend; |
| #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */ |
| |
| #ifdef ARP_OFFLOAD_SUPPORT |
| u32 pend_ipaddr; |
| #endif /* ARP_OFFLOAD_SUPPORT */ |
| #ifdef BCM_FD_AGGR |
| void *rpc_th; |
| void *rpc_osh; |
| struct timer_list rpcth_timer; |
| bool rpcth_timer_active; |
| uint8 fdaggr; |
| #endif |
| #ifdef DHDTCPACK_SUPPRESS |
| spinlock_t tcpack_lock; |
| #endif /* DHDTCPACK_SUPPRESS */ |
| #ifdef FIX_CPU_MIN_CLOCK |
| bool cpufreq_fix_status; |
| struct mutex cpufreq_fix; |
| struct pm_qos_request dhd_cpu_qos; |
| #ifdef FIX_BUS_MIN_CLOCK |
| struct pm_qos_request dhd_bus_qos; |
| #endif /* FIX_BUS_MIN_CLOCK */ |
| #endif /* FIX_CPU_MIN_CLOCK */ |
| void *dhd_deferred_wq; |
| #ifdef DEBUG_CPU_FREQ |
| struct notifier_block freq_trans; |
| int __percpu *new_freq; |
| #endif |
| unsigned int unit; |
| struct notifier_block pm_notifier; |
| #ifdef DHD_PSTA |
| uint32 psta_mode; /* PSTA or PSR */ |
| #endif /* DHD_PSTA */ |
| #ifdef DHD_WET |
| uint32 wet_mode; |
| #endif /* DHD_WET */ |
| #ifdef DHD_DEBUG |
| dhd_dump_t *dump; |
| struct timer_list join_timer; |
| u32 join_timeout_val; |
| bool join_timer_active; |
| uint scan_time_count; |
| struct timer_list scan_timer; |
| bool scan_timer_active; |
| #endif |
| #if defined(DHD_LB) |
| /* CPU Load Balance dynamic CPU selection */ |
| |
| /* Variable that tracks the currect CPUs available for candidacy */ |
| cpumask_var_t cpumask_curr_avail; |
| |
| /* Primary and secondary CPU mask */ |
| cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */ |
| cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */ |
| |
| struct notifier_block cpu_notifier; |
| |
| /* Tasklet to handle Tx Completion packet freeing */ |
| struct tasklet_struct tx_compl_tasklet; |
| atomic_t tx_compl_cpu; |
| |
| /* Tasklet to handle RxBuf Post during Rx completion */ |
| struct tasklet_struct rx_compl_tasklet; |
| atomic_t rx_compl_cpu; |
| |
| /* Napi struct for handling rx packet sendup. Packets are removed from |
| * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then |
| * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled |
| * to run to rx_napi_cpu. |
| */ |
| struct sk_buff_head rx_pend_queue ____cacheline_aligned; |
| struct sk_buff_head rx_napi_queue ____cacheline_aligned; |
| struct napi_struct rx_napi_struct ____cacheline_aligned; |
| atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */ |
| struct net_device *rx_napi_netdev; /* netdev of primary interface */ |
| |
| struct work_struct rx_napi_dispatcher_work; |
| struct work_struct tx_compl_dispatcher_work; |
| struct work_struct tx_dispatcher_work; |
| |
| /* Number of times DPC Tasklet ran */ |
| uint32 dhd_dpc_cnt; |
| /* Number of times NAPI processing got scheduled */ |
| uint32 napi_sched_cnt; |
| /* Number of times NAPI processing ran on each available core */ |
| uint32 *napi_percpu_run_cnt; |
| /* Number of times RX Completions got scheduled */ |
| uint32 rxc_sched_cnt; |
| /* Number of times RX Completion ran on each available core */ |
| uint32 *rxc_percpu_run_cnt; |
| /* Number of times TX Completions got scheduled */ |
| uint32 txc_sched_cnt; |
| /* Number of times TX Completions ran on each available core */ |
| uint32 *txc_percpu_run_cnt; |
| /* CPU status */ |
| /* Number of times each CPU came online */ |
| uint32 *cpu_online_cnt; |
| /* Number of times each CPU went offline */ |
| uint32 *cpu_offline_cnt; |
| |
| /* Number of times TX processing run on each core */ |
| uint32 *txp_percpu_run_cnt; |
| /* Number of times TX start run on each core */ |
| uint32 *tx_start_percpu_run_cnt; |
| |
| /* Tx load balancing */ |
| |
| /* TODO: Need to see if batch processing is really required in case of TX |
| * processing. In case of RX the Dongle can send a bunch of rx completions, |
| * hence we took a 3 queue approach |
| * enque - adds the skbs to rx_pend_queue |
| * dispatch - uses a lock and adds the list of skbs from pend queue to |
| * napi queue |
| * napi processing - copies the pend_queue into a local queue and works |
| * on it. |
| * But for TX its going to be 1 skb at a time, so we are just thinking |
| * of using only one queue and use the lock supported skb queue functions |
| * to add and process it. If its in-efficient we'll re-visit the queue |
| * design. |
| */ |
| |
| /* When the NET_TX tries to send a TX packet put it into tx_pend_queue */ |
| /* struct sk_buff_head tx_pend_queue ____cacheline_aligned; */ |
| /* |
| * From the Tasklet that actually sends out data |
| * copy the list tx_pend_queue into tx_active_queue. There by we need |
| * to spinlock to only perform the copy the rest of the code ie to |
| * construct the tx_pend_queue and the code to process tx_active_queue |
| * can be lockless. The concept is borrowed as is from RX processing |
| */ |
| /* struct sk_buff_head tx_active_queue ____cacheline_aligned; */ |
| |
| /* Control TXP in runtime, enable by default */ |
| atomic_t lb_txp_active; |
| |
| /* |
| * When the NET_TX tries to send a TX packet put it into tx_pend_queue |
| * For now, the processing tasklet will also direcly operate on this |
| * queue |
| */ |
| struct sk_buff_head tx_pend_queue ____cacheline_aligned; |
| |
| /* cpu on which the DHD Tx is happenning */ |
| atomic_t tx_cpu; |
| |
| /* CPU on which the Network stack is calling the DHD's xmit function */ |
| atomic_t net_tx_cpu; |
| |
| /* Tasklet context from which the DHD's TX processing happens */ |
| struct tasklet_struct tx_tasklet; |
| |
| /* |
| * Consumer Histogram - NAPI RX Packet processing |
| * ----------------------------------------------- |
| * On Each CPU, when the NAPI RX Packet processing call back was invoked |
| * how many packets were processed is captured in this data structure. |
| * Now its difficult to capture the "exact" number of packets processed. |
| * So considering the packet counter to be a 32 bit one, we have a |
| * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets |
| * processed is rounded off to the next power of 2 and put in the |
| * approriate "bin" the value in the bin gets incremented. |
| * For example, assume that in CPU 1 if NAPI Rx runs 3 times |
| * and the packet count processed is as follows (assume the bin counters are 0) |
| * iteration 1 - 10 (the bin counter 2^4 increments to 1) |
| * iteration 2 - 30 (the bin counter 2^5 increments to 1) |
| * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2) |
| */ |
| uint32 *napi_rx_hist[HIST_BIN_SIZE]; |
| uint32 *txc_hist[HIST_BIN_SIZE]; |
| uint32 *rxc_hist[HIST_BIN_SIZE]; |
| #endif /* DHD_LB */ |
| |
| #ifdef SHOW_LOGTRACE |
| struct work_struct event_log_dispatcher_work; |
| #endif /* SHOW_LOGTRACE */ |
| |
| #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) |
| #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ |
| struct kobject dhd_kobj; |
| #ifdef SHOW_LOGTRACE |
| struct sk_buff_head evt_trace_queue ____cacheline_aligned; |
| #endif |
| struct timer_list timesync_timer; |
| #if defined(BT_OVER_SDIO) |
| char btfw_path[PATH_MAX]; |
| #endif /* defined (BT_OVER_SDIO) */ |
| |
| #ifdef WL_MONITOR |
| struct net_device *monitor_dev; /* monitor pseudo device */ |
| struct sk_buff *monitor_skb; |
| uint monitor_len; |
| uint monitor_type; /* monitor pseudo device */ |
| monitor_info_t *monitor_info; |
| #endif /* WL_MONITOR */ |
| uint32 shub_enable; |
| #if defined(BT_OVER_SDIO) |
| struct mutex bus_user_lock; /* lock for sdio bus apis shared between WLAN & BT */ |
| int bus_user_count; /* User counts of sdio bus shared between WLAN & BT */ |
| #endif /* BT_OVER_SDIO */ |
| #ifdef DHD_DEBUG_UART |
| bool duart_execute; |
| #endif |
| #ifdef PCIE_INB_DW |
| wait_queue_head_t ds_exit_wait; |
| #endif /* PCIE_INB_DW */ |
| } dhd_info_t; |
| |
| #ifdef WL_MONITOR |
| #define MONPKT_EXTRA_LEN 48 |
| #endif |
| |
| #define DHDIF_FWDER(dhdif) FALSE |
| |
| #if defined(BT_OVER_SDIO) |
| /* Flag to indicate if driver is initialized */ |
| uint dhd_driver_init_done = TRUE; |
| #else |
| /* Flag to indicate if driver is initialized */ |
| uint dhd_driver_init_done = FALSE; |
| #endif |
| /* Flag to indicate if we should download firmware on driver load */ |
| uint dhd_download_fw_on_driverload = TRUE; |
| |
| /* Definitions to provide path to the firmware and nvram |
| * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt" |
| */ |
| char firmware_path[MOD_PARAM_PATHLEN]; |
| char nvram_path[MOD_PARAM_PATHLEN]; |
| char clm_path[MOD_PARAM_PATHLEN]; |
| char config_path[MOD_PARAM_PATHLEN]; |
| #ifdef DHD_UCODE_DOWNLOAD |
| char ucode_path[MOD_PARAM_PATHLEN]; |
| #endif /* DHD_UCODE_DOWNLOAD */ |
| |
| module_param_string(clm_path, clm_path, MOD_PARAM_PATHLEN, 0660); |
| |
| |
| /* backup buffer for firmware and nvram path */ |
| char fw_bak_path[MOD_PARAM_PATHLEN]; |
| char nv_bak_path[MOD_PARAM_PATHLEN]; |
| |
| /* information string to keep firmware, chio, cheip version info visiable from log */ |
| char info_string[MOD_PARAM_INFOLEN]; |
| module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444); |
| int op_mode = 0; |
| int disable_proptx = 0; |
| module_param(op_mode, int, 0644); |
| extern int wl_control_wl_start(struct net_device *dev); |
| #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC) |
| struct semaphore dhd_registration_sem; |
| #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ |
| |
| /* deferred handlers */ |
| static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event); |
| static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event); |
| static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event); |
| static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event); |
| |
| #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) |
| static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event); |
| #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ |
| #ifdef WL_CFG80211 |
| extern void dhd_netdev_free(struct net_device *ndev); |
| #endif /* WL_CFG80211 */ |
| |
| #if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER)) |
| /* update rx_pkt_chainable state of dhd interface */ |
| static void dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx); |
| #endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */ |
| |
| #ifdef HOFFLOAD_MODULES |
| char dhd_hmem_module_string[MOD_PARAM_SRLEN]; |
| module_param_string(dhd_hmem_module_string, dhd_hmem_module_string, MOD_PARAM_SRLEN, 0660); |
| #endif |
| /* Error bits */ |
| module_param(dhd_msg_level, int, 0); |
| #if defined(WL_WIRELESS_EXT) |
| module_param(iw_msg_level, int, 0); |
| #endif |
| #ifdef WL_CFG80211 |
| module_param(wl_dbg_level, int, 0); |
| #endif |
| module_param(android_msg_level, int, 0); |
| module_param(config_msg_level, int, 0); |
| |
| #ifdef ARP_OFFLOAD_SUPPORT |
| /* ARP offload enable */ |
| uint dhd_arp_enable = TRUE; |
| module_param(dhd_arp_enable, uint, 0); |
| |
| /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */ |
| |
| #ifdef ENABLE_ARP_SNOOP_MODE |
| uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP | ARP_OL_HOST_AUTO_REPLY; |
| #else |
| uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY; |
| #endif /* ENABLE_ARP_SNOOP_MODE */ |
| |
| module_param(dhd_arp_mode, uint, 0); |
| #endif /* ARP_OFFLOAD_SUPPORT */ |
| |
| /* Disable Prop tx */ |
| module_param(disable_proptx, int, 0644); |
| /* load firmware and/or nvram values from the filesystem */ |
| module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660); |
| module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660); |
| module_param_string(config_path, config_path, MOD_PARAM_PATHLEN, 0); |
| #ifdef DHD_UCODE_DOWNLOAD |
| module_param_string(ucode_path, ucode_path, MOD_PARAM_PATHLEN, 0660); |
| #endif /* DHD_UCODE_DOWNLOAD */ |
| |
| /* Watchdog interval */ |
| |
| /* extend watchdog expiration to 2 seconds when DPC is running */ |
| #define WATCHDOG_EXTEND_INTERVAL (2000) |
| |
| uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS; |
| module_param(dhd_watchdog_ms, uint, 0); |
| |
| #ifdef DHD_PCIE_RUNTIMEPM |
| uint dhd_runtimepm_ms = CUSTOM_DHD_RUNTIME_MS; |
| #endif /* DHD_PCIE_RUNTIMEPMT */ |
| #if defined(DHD_DEBUG) |
| /* Console poll interval */ |
| uint dhd_console_ms = 0; |
| module_param(dhd_console_ms, uint, 0644); |
| #else |
| uint dhd_console_ms = 0; |
| #endif /* DHD_DEBUG */ |
| |
| uint dhd_slpauto = TRUE; |
| module_param(dhd_slpauto, uint, 0); |
| |
| #ifdef PKT_FILTER_SUPPORT |
| /* Global Pkt filter enable control */ |
| uint dhd_pkt_filter_enable = TRUE; |
| module_param(dhd_pkt_filter_enable, uint, 0); |
| #endif |
| |
| /* Pkt filter init setup */ |
| uint dhd_pkt_filter_init = 0; |
| module_param(dhd_pkt_filter_init, uint, 0); |
| |
| /* Pkt filter mode control */ |
| #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER |
| uint dhd_master_mode = FALSE; |
| #else |
| uint dhd_master_mode = FALSE; |
| #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */ |
| module_param(dhd_master_mode, uint, 0); |
| |
| int dhd_watchdog_prio = 0; |
| module_param(dhd_watchdog_prio, int, 0); |
| |
| /* DPC thread priority */ |
| int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING; |
| module_param(dhd_dpc_prio, int, 0); |
| |
| /* RX frame thread priority */ |
| int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING; |
| module_param(dhd_rxf_prio, int, 0); |
| |
| #if !defined(BCMDHDUSB) |
| extern int dhd_dongle_ramsize; |
| module_param(dhd_dongle_ramsize, int, 0); |
| #endif /* BCMDHDUSB */ |
| |
| #ifdef WL_CFG80211 |
| int passive_channel_skip = 0; |
| module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR)); |
| #endif /* WL_CFG80211 */ |
| |
| /* Keep track of number of instances */ |
| static int dhd_found = 0; |
| static int instance_base = 0; /* Starting instance number */ |
| module_param(instance_base, int, 0644); |
| |
| #if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE) |
| static int dhd_napi_weight = 32; |
| module_param(dhd_napi_weight, int, 0644); |
| #endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */ |
| |
| #ifdef PCIE_FULL_DONGLE |
| extern int h2d_max_txpost; |
| module_param(h2d_max_txpost, int, 0644); |
| #endif /* PCIE_FULL_DONGLE */ |
| |
| #ifdef DHD_DHCP_DUMP |
| struct bootp_fmt { |
| struct iphdr ip_header; |
| struct udphdr udp_header; |
| uint8 op; |
| uint8 htype; |
| uint8 hlen; |
| uint8 hops; |
| uint32 transaction_id; |
| uint16 secs; |
| uint16 flags; |
| uint32 client_ip; |
| uint32 assigned_ip; |
| uint32 server_ip; |
| uint32 relay_ip; |
| uint8 hw_address[16]; |
| uint8 server_name[64]; |
| uint8 file_name[128]; |
| uint8 options[312]; |
| }; |
| |
| static const uint8 bootp_magic_cookie[4] = { 99, 130, 83, 99 }; |
| static const char dhcp_ops[][10] = { |
| "NA", "REQUEST", "REPLY" |
| }; |
| static const char dhcp_types[][10] = { |
| "NA", "DISCOVER", "OFFER", "REQUEST", "DECLINE", "ACK", "NAK", "RELEASE", "INFORM" |
| }; |
| static void dhd_dhcp_dump(char *ifname, uint8 *pktdata, bool tx); |
| #endif /* DHD_DHCP_DUMP */ |
| |
| #ifdef DHD_ICMP_DUMP |
| #include <net/icmp.h> |
| static void dhd_icmp_dump(char *ifname, uint8 *pktdata, bool tx); |
| #endif /* DHD_ICMP_DUMP */ |
| |
| /* Functions to manage sysfs interface for dhd */ |
| static int dhd_sysfs_init(dhd_info_t *dhd); |
| static void dhd_sysfs_exit(dhd_info_t *dhd); |
| |
| #ifdef SHOW_LOGTRACE |
| #if defined(CUSTOMER_HW4_DEBUG) |
| static char *logstrs_path = PLATFORM_PATH"logstrs.bin"; |
| static char *st_str_file_path = PLATFORM_PATH"rtecdc.bin"; |
| static char *map_file_path = PLATFORM_PATH"rtecdc.map"; |
| static char *rom_st_str_file_path = PLATFORM_PATH"roml.bin"; |
| static char *rom_map_file_path = PLATFORM_PATH"roml.map"; |
| #elif defined(CUSTOMER_HW2) |
| static char *logstrs_path = "/data/misc/wifi/logstrs.bin"; |
| static char *st_str_file_path = "/data/misc/wifi/rtecdc.bin"; |
| static char *map_file_path = "/data/misc/wifi/rtecdc.map"; |
| static char *rom_st_str_file_path = "/data/misc/wifi/roml.bin"; |
| static char *rom_map_file_path = "/data/misc/wifi/roml.map"; |
| #else |
| static char *logstrs_path = "/installmedia/logstrs.bin"; |
| static char *st_str_file_path = "/installmedia/rtecdc.bin"; |
| static char *map_file_path = "/installmedia/rtecdc.map"; |
| static char *rom_st_str_file_path = "/installmedia/roml.bin"; |
| static char *rom_map_file_path = "/installmedia/roml.map"; |
| #endif /* CUSTOMER_HW4_DEBUG || CUSTOMER_HW2 */ |
| static char *ram_file_str = "rtecdc"; |
| static char *rom_file_str = "roml"; |
| |
| module_param(logstrs_path, charp, S_IRUGO); |
| module_param(st_str_file_path, charp, S_IRUGO); |
| module_param(map_file_path, charp, S_IRUGO); |
| module_param(rom_st_str_file_path, charp, S_IRUGO); |
| module_param(rom_map_file_path, charp, S_IRUGO); |
| |
| static int dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp); |
| static int dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start, |
| uint32 *rodata_end); |
| static int dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file, |
| char *map_file); |
| #endif /* SHOW_LOGTRACE */ |
| |
| #if defined(DHD_LB) |
| |
| static void |
| dhd_lb_set_default_cpus(dhd_info_t *dhd) |
| { |
| /* Default CPU allocation for the jobs */ |
| atomic_set(&dhd->rx_napi_cpu, 1); |
| atomic_set(&dhd->rx_compl_cpu, 2); |
| atomic_set(&dhd->tx_compl_cpu, 2); |
| atomic_set(&dhd->tx_cpu, 2); |
| atomic_set(&dhd->net_tx_cpu, 0); |
| } |
| |
| static void |
| dhd_cpumasks_deinit(dhd_info_t *dhd) |
| { |
| free_cpumask_var(dhd->cpumask_curr_avail); |
| free_cpumask_var(dhd->cpumask_primary); |
| free_cpumask_var(dhd->cpumask_primary_new); |
| free_cpumask_var(dhd->cpumask_secondary); |
| free_cpumask_var(dhd->cpumask_secondary_new); |
| } |
| |
| static int |
| dhd_cpumasks_init(dhd_info_t *dhd) |
| { |
| int id; |
| uint32 cpus, num_cpus = num_possible_cpus(); |
| int ret = 0; |
| |
| DHD_ERROR(("%s CPU masks primary(big)=0x%x secondary(little)=0x%x\n", __FUNCTION__, |
| DHD_LB_PRIMARY_CPUS, DHD_LB_SECONDARY_CPUS)); |
| |
| if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) || |
| !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) || |
| !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) || |
| !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) || |
| !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) { |
| DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__)); |
| ret = -ENOMEM; |
| goto fail; |
| } |
| |
| cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask); |
| cpumask_clear(dhd->cpumask_primary); |
| cpumask_clear(dhd->cpumask_secondary); |
| |
| if (num_cpus > 32) { |
| DHD_ERROR(("%s max cpus must be 32, %d too big\n", __FUNCTION__, num_cpus)); |
| ASSERT(0); |
| } |
| |
| cpus = DHD_LB_PRIMARY_CPUS; |
| for (id = 0; id < num_cpus; id++) { |
| if (isset(&cpus, id)) |
| cpumask_set_cpu(id, dhd->cpumask_primary); |
| } |
| |
| cpus = DHD_LB_SECONDARY_CPUS; |
| for (id = 0; id < num_cpus; id++) { |
| if (isset(&cpus, id)) |
| cpumask_set_cpu(id, dhd->cpumask_secondary); |
| } |
| |
| return ret; |
| fail: |
| dhd_cpumasks_deinit(dhd); |
| return ret; |
| } |
| |
| /* |
| * The CPU Candidacy Algorithm |
| * ~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
| * The available CPUs for selection are divided into two groups |
| * Primary Set - A CPU mask that carries the First Choice CPUs |
| * Secondary Set - A CPU mask that carries the Second Choice CPUs. |
| * |
| * There are two types of Job, that needs to be assigned to |
| * the CPUs, from one of the above mentioned CPU group. The Jobs are |
| * 1) Rx Packet Processing - napi_cpu |
| * 2) Completion Processiong (Tx, RX) - compl_cpu |
| * |
| * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes |
| * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy |
| * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu. |
| * If there are more processors free, it assigns one to compl_cpu. |
| * It also tries to ensure that both napi_cpu and compl_cpu are not on the same |
| * CPU, as much as possible. |
| * |
| * By design, both Tx and Rx completion jobs are run on the same CPU core, as it |
| * would allow Tx completion skb's to be released into a local free pool from |
| * which the rx buffer posts could have been serviced. it is important to note |
| * that a Tx packet may not have a large enough buffer for rx posting. |
| */ |
| void dhd_select_cpu_candidacy(dhd_info_t *dhd) |
| { |
| uint32 primary_available_cpus; /* count of primary available cpus */ |
| uint32 secondary_available_cpus; /* count of secondary available cpus */ |
| uint32 napi_cpu = 0; /* cpu selected for napi rx processing */ |
| uint32 compl_cpu = 0; /* cpu selected for completion jobs */ |
| uint32 tx_cpu = 0; /* cpu selected for tx processing job */ |
| |
| cpumask_clear(dhd->cpumask_primary_new); |
| cpumask_clear(dhd->cpumask_secondary_new); |
| |
| /* |
| * Now select from the primary mask. Even if a Job is |
| * already running on a CPU in secondary group, we still move |
| * to primary CPU. So no conditional checks. |
| */ |
| cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary, |
| dhd->cpumask_curr_avail); |
| |
| cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary, |
| dhd->cpumask_curr_avail); |
| |
| primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new); |
| |
| if (primary_available_cpus > 0) { |
| napi_cpu = cpumask_first(dhd->cpumask_primary_new); |
| |
| /* If no further CPU is available, |
| * cpumask_next returns >= nr_cpu_ids |
| */ |
| tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new); |
| if (tx_cpu >= nr_cpu_ids) |
| tx_cpu = 0; |
| |
| /* In case there are no more CPUs, do completions & Tx in same CPU */ |
| compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_primary_new); |
| if (compl_cpu >= nr_cpu_ids) |
| compl_cpu = tx_cpu; |
| } |
| |
| DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n", |
| __FUNCTION__, napi_cpu, compl_cpu, tx_cpu)); |
| |
| /* -- Now check for the CPUs from the secondary mask -- */ |
| secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new); |
| |
| DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n", |
| __FUNCTION__, secondary_available_cpus, nr_cpu_ids)); |
| |
| if (secondary_available_cpus > 0) { |
| /* At this point if napi_cpu is unassigned it means no CPU |
| * is online from Primary Group |
| */ |
| if (napi_cpu == 0) { |
| napi_cpu = cpumask_first(dhd->cpumask_secondary_new); |
| tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new); |
| compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new); |
| } else if (tx_cpu == 0) { |
| tx_cpu = cpumask_first(dhd->cpumask_secondary_new); |
| compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new); |
| } else if (compl_cpu == 0) { |
| compl_cpu = cpumask_first(dhd->cpumask_secondary_new); |
| } |
| |
| /* If no CPU was available for tx processing, choose CPU 0 */ |
| if (tx_cpu >= nr_cpu_ids) |
| tx_cpu = 0; |
| |
| /* If no CPU was available for completion, choose CPU 0 */ |
| if (compl_cpu >= nr_cpu_ids) |
| compl_cpu = 0; |
| } |
| if ((primary_available_cpus == 0) && |
| (secondary_available_cpus == 0)) { |
| /* No CPUs available from primary or secondary mask */ |
| napi_cpu = 1; |
| compl_cpu = 0; |
| tx_cpu = 2; |
| } |
| |
| DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n", |
| __FUNCTION__, napi_cpu, compl_cpu, tx_cpu)); |
| |
| ASSERT(napi_cpu < nr_cpu_ids); |
| ASSERT(compl_cpu < nr_cpu_ids); |
| ASSERT(tx_cpu < nr_cpu_ids); |
| |
| atomic_set(&dhd->rx_napi_cpu, napi_cpu); |
| atomic_set(&dhd->tx_compl_cpu, compl_cpu); |
| atomic_set(&dhd->rx_compl_cpu, compl_cpu); |
| atomic_set(&dhd->tx_cpu, tx_cpu); |
| |
| return; |
| } |
| |
| /* |
| * Function to handle CPU Hotplug notifications. |
| * One of the task it does is to trigger the CPU Candidacy algorithm |
| * for load balancing. |
| */ |
| int |
| dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) |
| { |
| unsigned long int cpu = (unsigned long int)hcpu; |
| |
| #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) |
| #pragma GCC diagnostic push |
| #pragma GCC diagnostic ignored "-Wcast-qual" |
| #endif |
| dhd_info_t *dhd = container_of(nfb, dhd_info_t, cpu_notifier); |
| #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) |
| #pragma GCC diagnostic pop |
| #endif |
| |
| if (!dhd || !(dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE)) { |
| DHD_INFO(("%s(): LB data is not initialized yet.\n", |
| __FUNCTION__)); |
| return NOTIFY_BAD; |
| } |
| |
| switch (action) |
| { |
| case CPU_ONLINE: |
| case CPU_ONLINE_FROZEN: |
| DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]); |
| cpumask_set_cpu(cpu, dhd->cpumask_curr_avail); |
| dhd_select_cpu_candidacy(dhd); |
| break; |
| |
| case CPU_DOWN_PREPARE: |
| case CPU_DOWN_PREPARE_FROZEN: |
| DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]); |
| cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail); |
| dhd_select_cpu_candidacy(dhd); |
| break; |
| default: |
| break; |
| } |
| |
| return NOTIFY_OK; |
| } |
| |
| #if defined(DHD_LB_STATS) |
| void dhd_lb_stats_init(dhd_pub_t *dhdp) |
| { |
| dhd_info_t *dhd; |
| int i, j, num_cpus = num_possible_cpus(); |
| int alloc_size = sizeof(uint32) * num_cpus; |
| |
| if (dhdp == NULL) { |
| DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n", |
| __FUNCTION__)); |
| return; |
| } |
| |
| dhd = dhdp->info; |
| if (dhd == NULL) { |
| DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__)); |
| return; |
| } |
| |
| DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt); |
| DHD_LB_STATS_CLR(dhd->napi_sched_cnt); |
| |
| dhd->napi_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); |
| if (!dhd->napi_percpu_run_cnt) { |
| DHD_ERROR(("%s(): napi_percpu_run_cnt malloc failed \n", |
| __FUNCTION__)); |
| return; |
| } |
| for (i = 0; i < num_cpus; i++) |
| DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]); |
| |
| DHD_LB_STATS_CLR(dhd->rxc_sched_cnt); |
| |
| dhd->rxc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); |
| if (!dhd->rxc_percpu_run_cnt) { |
| DHD_ERROR(("%s(): rxc_percpu_run_cnt malloc failed \n", |
| __FUNCTION__)); |
| return; |
| } |
| for (i = 0; i < num_cpus; i++) |
| DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]); |
| |
| DHD_LB_STATS_CLR(dhd->txc_sched_cnt); |
| |
| dhd->txc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); |
| if (!dhd->txc_percpu_run_cnt) { |
| DHD_ERROR(("%s(): txc_percpu_run_cnt malloc failed \n", |
| __FUNCTION__)); |
| return; |
| } |
| for (i = 0; i < num_cpus; i++) |
| DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]); |
| |
| dhd->cpu_online_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); |
| if (!dhd->cpu_online_cnt) { |
| DHD_ERROR(("%s(): cpu_online_cnt malloc failed \n", |
| __FUNCTION__)); |
| return; |
| } |
| for (i = 0; i < num_cpus; i++) |
| DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]); |
| |
| dhd->cpu_offline_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); |
| if (!dhd->cpu_offline_cnt) { |
| DHD_ERROR(("%s(): cpu_offline_cnt malloc failed \n", |
| __FUNCTION__)); |
| return; |
| } |
| for (i = 0; i < num_cpus; i++) |
| DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]); |
| |
| dhd->txp_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); |
| if (!dhd->txp_percpu_run_cnt) { |
| DHD_ERROR(("%s(): txp_percpu_run_cnt malloc failed \n", |
| __FUNCTION__)); |
| return; |
| } |
| for (i = 0; i < num_cpus; i++) |
| DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]); |
| |
| dhd->tx_start_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); |
| if (!dhd->tx_start_percpu_run_cnt) { |
| DHD_ERROR(("%s(): tx_start_percpu_run_cnt malloc failed \n", |
| __FUNCTION__)); |
| return; |
| } |
| for (i = 0; i < num_cpus; i++) |
| DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]); |
| |
| for (j = 0; j < HIST_BIN_SIZE; j++) { |
| dhd->napi_rx_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size); |
| if (!dhd->napi_rx_hist[j]) { |
| DHD_ERROR(("%s(): dhd->napi_rx_hist[%d] malloc failed \n", |
| __FUNCTION__, j)); |
| return; |
| } |
| for (i = 0; i < num_cpus; i++) { |
| DHD_LB_STATS_CLR(dhd->napi_rx_hist[j][i]); |
| } |
| } |
| #ifdef DHD_LB_TXC |
| for (j = 0; j < HIST_BIN_SIZE; j++) { |
| dhd->txc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size); |
| if (!dhd->txc_hist[j]) { |
| DHD_ERROR(("%s(): dhd->txc_hist[%d] malloc failed \n", |
| __FUNCTION__, j)); |
| return; |
| } |
| for (i = 0; i < num_cpus; i++) { |
| DHD_LB_STATS_CLR(dhd->txc_hist[j][i]); |
| } |
| } |
| #endif /* DHD_LB_TXC */ |
| #ifdef DHD_LB_RXC |
| for (j = 0; j < HIST_BIN_SIZE; j++) { |
| dhd->rxc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size); |
| if (!dhd->rxc_hist[j]) { |
| DHD_ERROR(("%s(): dhd->rxc_hist[%d] malloc failed \n", |
| __FUNCTION__, j)); |
| return; |
| } |
| for (i = 0; i < num_cpus; i++) { |
| DHD_LB_STATS_CLR(dhd->rxc_hist[j][i]); |
| } |
| } |
| #endif /* DHD_LB_RXC */ |
| return; |
| } |
| |
| void dhd_lb_stats_deinit(dhd_pub_t *dhdp) |
| { |
| dhd_info_t *dhd; |
| int j, num_cpus = num_possible_cpus(); |
| int alloc_size = sizeof(uint32) * num_cpus; |
| |
| if (dhdp == NULL) { |
| DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n", |
| __FUNCTION__)); |
| return; |
| } |
| |
| dhd = dhdp->info; |
| if (dhd == NULL) { |
| DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__)); |
| return; |
| } |
| |
| if (dhd->napi_percpu_run_cnt) { |
| MFREE(dhdp->osh, dhd->napi_percpu_run_cnt, alloc_size); |
| dhd->napi_percpu_run_cnt = NULL; |
| } |
| if (dhd->rxc_percpu_run_cnt) { |
| MFREE(dhdp->osh, dhd->rxc_percpu_run_cnt, alloc_size); |
| dhd->rxc_percpu_run_cnt = NULL; |
| } |
| if (dhd->txc_percpu_run_cnt) { |
| MFREE(dhdp->osh, dhd->txc_percpu_run_cnt, alloc_size); |
| dhd->txc_percpu_run_cnt = NULL; |
| } |
| if (dhd->cpu_online_cnt) { |
| MFREE(dhdp->osh, dhd->cpu_online_cnt, alloc_size); |
| dhd->cpu_online_cnt = NULL; |
| } |
| if (dhd->cpu_offline_cnt) { |
| MFREE(dhdp->osh, dhd->cpu_offline_cnt, alloc_size); |
| dhd->cpu_offline_cnt = NULL; |
| } |
| |
| if (dhd->txp_percpu_run_cnt) { |
| MFREE(dhdp->osh, dhd->txp_percpu_run_cnt, alloc_size); |
| dhd->txp_percpu_run_cnt = NULL; |
| } |
| if (dhd->tx_start_percpu_run_cnt) { |
| MFREE(dhdp->osh, dhd->tx_start_percpu_run_cnt, alloc_size); |
| dhd->tx_start_percpu_run_cnt = NULL; |
| } |
| |
| for (j = 0; j < HIST_BIN_SIZE; j++) { |
| if (dhd->napi_rx_hist[j]) { |
| MFREE(dhdp->osh, dhd->napi_rx_hist[j], alloc_size); |
| dhd->napi_rx_hist[j] = NULL; |
| } |
| #ifdef DHD_LB_TXC |
| if (dhd->txc_hist[j]) { |
| MFREE(dhdp->osh, dhd->txc_hist[j], alloc_size); |
| dhd->txc_hist[j] = NULL; |
| } |
| #endif /* DHD_LB_TXC */ |
| #ifdef DHD_LB_RXC |
| if (dhd->rxc_hist[j]) { |
| MFREE(dhdp->osh, dhd->rxc_hist[j], alloc_size); |
| dhd->rxc_hist[j] = NULL; |
| } |
| #endif /* DHD_LB_RXC */ |
| } |
| |
| return; |
| } |
| |
| static void dhd_lb_stats_dump_histo( |
| struct bcmstrbuf *strbuf, uint32 **hist) |
| { |
| int i, j; |
| uint32 *per_cpu_total; |
| uint32 total = 0; |
| uint32 num_cpus = num_possible_cpus(); |
| |
| per_cpu_total = (uint32 *)kmalloc(sizeof(uint32) * num_cpus, GFP_ATOMIC); |
| if (!per_cpu_total) { |
| DHD_ERROR(("%s(): dhd->per_cpu_total malloc failed \n", __FUNCTION__)); |
| return; |
| } |
| bzero(per_cpu_total, sizeof(uint32) * num_cpus); |
| |
| bcm_bprintf(strbuf, "CPU: \t\t"); |
| for (i = 0; i < num_cpus; i++) |
| bcm_bprintf(strbuf, "%d\t", i); |
| bcm_bprintf(strbuf, "\nBin\n"); |
| |
| for (i = 0; i < HIST_BIN_SIZE; i++) { |
| bcm_bprintf(strbuf, "%d:\t\t", 1<<i); |
| for (j = 0; j < num_cpus; j++) { |
| bcm_bprintf(strbuf, "%d\t", hist[i][j]); |
| } |
| bcm_bprintf(strbuf, "\n"); |
| } |
| bcm_bprintf(strbuf, "Per CPU Total \t"); |
| total = 0; |
| for (i = 0; i < num_cpus; i++) { |
| for (j = 0; j < HIST_BIN_SIZE; j++) { |
| per_cpu_total[i] += (hist[j][i] * (1<<j)); |
| } |
| bcm_bprintf(strbuf, "%d\t", per_cpu_total[i]); |
| total += per_cpu_total[i]; |
| } |
| bcm_bprintf(strbuf, "\nTotal\t\t%d \n", total); |
| |
| kfree(per_cpu_total); |
| return; |
| } |
| |
| static inline void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p) |
| { |
| int i, num_cpus = num_possible_cpus(); |
| |
| bcm_bprintf(strbuf, "CPU: \t"); |
| for (i = 0; i < num_cpus; i++) |
| bcm_bprintf(strbuf, "%d\t", i); |
| bcm_bprintf(strbuf, "\n"); |
| |
| bcm_bprintf(strbuf, "Val: \t"); |
| for (i = 0; i < num_cpus; i++) |
| bcm_bprintf(strbuf, "%u\t", *(p+i)); |
| bcm_bprintf(strbuf, "\n"); |
| return; |
| } |
| |
| void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) |
| { |
| dhd_info_t *dhd; |
| |
| if (dhdp == NULL || strbuf == NULL) { |
| DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n", |
| __FUNCTION__, dhdp, strbuf)); |
| return; |
| } |
| |
| dhd = dhdp->info; |
| if (dhd == NULL) { |
| DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__)); |
| return; |
| } |
| |
| bcm_bprintf(strbuf, "\ncpu_online_cnt:\n"); |
| dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt); |
| |
| bcm_bprintf(strbuf, "\ncpu_offline_cnt:\n"); |
| dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt); |
| |
| bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n", |
| dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt, |
| dhd->txc_sched_cnt); |
| |
| #ifdef DHD_LB_RXP |
| bcm_bprintf(strbuf, "\nnapi_percpu_run_cnt:\n"); |
| dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt); |
| bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n"); |
| dhd_lb_stats_dump_histo(strbuf, dhd->napi_rx_hist); |
| #endif /* DHD_LB_RXP */ |
| |
| #ifdef DHD_LB_RXC |
| bcm_bprintf(strbuf, "\nrxc_percpu_run_cnt:\n"); |
| dhd_lb_stats_dump_cpu_array(strbuf, dhd->rxc_percpu_run_cnt); |
| bcm_bprintf(strbuf, "\nRX Completions (Buffer Post) Histogram:\n"); |
| dhd_lb_stats_dump_histo(strbuf, dhd->rxc_hist); |
| #endif /* DHD_LB_RXC */ |
| |
| #ifdef DHD_LB_TXC |
| bcm_bprintf(strbuf, "\ntxc_percpu_run_cnt:\n"); |
| dhd_lb_stats_dump_cpu_array(strbuf, dhd->txc_percpu_run_cnt); |
| bcm_bprintf(strbuf, "\nTX Completions (Buffer Free) Histogram:\n"); |
| dhd_lb_stats_dump_histo(strbuf, dhd->txc_hist); |
| #endif /* DHD_LB_TXC */ |
| |
| #ifdef DHD_LB_TXP |
| bcm_bprintf(strbuf, "\ntxp_percpu_run_cnt:\n"); |
| dhd_lb_stats_dump_cpu_array(strbuf, dhd->txp_percpu_run_cnt); |
| |
| bcm_bprintf(strbuf, "\ntx_start_percpu_run_cnt:\n"); |
| dhd_lb_stats_dump_cpu_array(strbuf, dhd->tx_start_percpu_run_cnt); |
| #endif /* DHD_LB_TXP */ |
| |
| bcm_bprintf(strbuf, "\nCPU masks primary(big)=0x%x secondary(little)=0x%x\n", |
| DHD_LB_PRIMARY_CPUS, DHD_LB_SECONDARY_CPUS); |
| |
| bcm_bprintf(strbuf, "napi_cpu %x tx_cpu %x\n", |
| atomic_read(&dhd->rx_napi_cpu), atomic_read(&dhd->tx_cpu)); |
| |
| } |
| |
| /* Given a number 'n' returns 'm' that is next larger power of 2 after n */ |
| static inline uint32 next_larger_power2(uint32 num) |
| { |
| num--; |
| num |= (num >> 1); |
| num |= (num >> 2); |
| num |= (num >> 4); |
| num |= (num >> 8); |
| num |= (num >> 16); |
| |
| return (num + 1); |
| } |
| |
| static void dhd_lb_stats_update_histo(uint32 **bin, uint32 count, uint32 cpu) |
| { |
| uint32 bin_power; |
| uint32 *p; |
| bin_power = next_larger_power2(count); |
| |
| switch (bin_power) { |
| case 1: p = bin[0] + cpu; break; |
| case 2: p = bin[1] + cpu; break; |
| case 4: p = bin[2] + cpu; break; |
| case 8: p = bin[3] + cpu; break; |
| case 16: p = bin[4] + cpu; break; |
| case 32: p = bin[5] + cpu; break; |
| case 64: p = bin[6] + cpu; break; |
| case 128: p = bin[7] + cpu; break; |
| default : p = bin[8] + cpu; break; |
| } |
| |
| *p = *p + 1; |
| return; |
| } |
| |
| extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count) |
| { |
| int cpu; |
| dhd_info_t *dhd = dhdp->info; |
| |
| cpu = get_cpu(); |
| put_cpu(); |
| dhd_lb_stats_update_histo(dhd->napi_rx_hist, count, cpu); |
| |
| return; |
| } |
| |
| extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count) |
| { |
| int cpu; |
| dhd_info_t *dhd = dhdp->info; |
| |
| cpu = get_cpu(); |
| put_cpu(); |
| dhd_lb_stats_update_histo(dhd->txc_hist, count, cpu); |
| |
| return; |
| } |
| |
| extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count) |
| { |
| int cpu; |
| dhd_info_t *dhd = dhdp->info; |
| |
| cpu = get_cpu(); |
| put_cpu(); |
| dhd_lb_stats_update_histo(dhd->rxc_hist, count, cpu); |
| |
| return; |
| } |
| |
| extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp) |
| { |
| dhd_info_t *dhd = dhdp->info; |
| DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt); |
| } |
| |
| extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp) |
| { |
| dhd_info_t *dhd = dhdp->info; |
| DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt); |
| } |
| #endif /* DHD_LB_STATS */ |
| |
| #endif /* DHD_LB */ |
| |
| #if defined(DISABLE_FRAMEBURST_VSDB) && defined(USE_WFA_CERT_CONF) |
| int g_frameburst = 1; |
| #endif /* DISABLE_FRAMEBURST_VSDB && USE_WFA_CERT_CONF */ |
| |
| static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd); |
| |
| /* DHD Perimiter lock only used in router with bypass forwarding. */ |
| #define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0) |
| #define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0) |
| #define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0) |
| |
| #ifdef PCIE_FULL_DONGLE |
| #if defined(BCM_GMAC3) |
| #define DHD_IF_STA_LIST_LOCK_INIT(ifp) do { /* noop */ } while (0) |
| #define DHD_IF_STA_LIST_LOCK(ifp, flags) ({ BCM_REFERENCE(flags); }) |
| #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) ({ BCM_REFERENCE(flags); }) |
| |
| #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) |
| #define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ BCM_REFERENCE(slist); &(ifp)->sta_list; }) |
| #define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ BCM_REFERENCE(slist); }) |
| #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */ |
| |
| #else /* ! BCM_GMAC3 */ |
| #define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock) |
| #define DHD_IF_STA_LIST_LOCK(ifp, flags) \ |
| spin_lock_irqsave(&(ifp)->sta_list_lock, (flags)) |
| #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \ |
| spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags)) |
| |
| #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) |
| static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, |
| struct list_head *snapshot_list); |
| static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list); |
| #define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); }) |
| #define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); }) |
| #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */ |
| |
| #endif /* ! BCM_GMAC3 */ |
| #endif /* PCIE_FULL_DONGLE */ |
| |
| /* Control fw roaming */ |
| uint dhd_roam_disable = 0; |
| |
| #ifdef BCMDBGFS |
| extern void dhd_dbgfs_init(dhd_pub_t *dhdp); |
| extern void dhd_dbgfs_remove(void); |
| #endif |
| |
| |
| /* Control radio state */ |
| uint dhd_radio_up = 1; |
| |
| /* Network inteface name */ |
| char iface_name[IFNAMSIZ] = {'\0'}; |
| module_param_string(iface_name, iface_name, IFNAMSIZ, 0); |
| |
| /* The following are specific to the SDIO dongle */ |
| |
| /* IOCTL response timeout */ |
| int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT; |
| |
| /* DS Exit response timeout */ |
| int ds_exit_timeout_msec = DS_EXIT_TIMEOUT; |
| |
| /* Idle timeout for backplane clock */ |
| int dhd_idletime = DHD_IDLETIME_TICKS; |
| module_param(dhd_idletime, int, 0); |
| |
| /* Use polling */ |
| uint dhd_poll = FALSE; |
| module_param(dhd_poll, uint, 0); |
| |
| /* Use interrupts */ |
| uint dhd_intr = TRUE; |
| module_param(dhd_intr, uint, 0); |
| |
| /* SDIO Drive Strength (in milliamps) */ |
| uint dhd_sdiod_drive_strength = 6; |
| module_param(dhd_sdiod_drive_strength, uint, 0); |
| |
| #ifdef BCMSDIO |
| /* Tx/Rx bounds */ |
| extern uint dhd_txbound; |
| extern uint dhd_rxbound; |
| module_param(dhd_txbound, uint, 0); |
| module_param(dhd_rxbound, uint, 0); |
| |
| /* Deferred transmits */ |
| extern uint dhd_deferred_tx; |
| module_param(dhd_deferred_tx, uint, 0); |
| |
| #endif /* BCMSDIO */ |
| |
| |
| #ifdef SDTEST |
| /* Echo packet generator (pkts/s) */ |
| uint dhd_pktgen = 0; |
| module_param(dhd_pktgen, uint, 0); |
| |
| /* Echo packet len (0 => sawtooth, max 2040) */ |
| uint dhd_pktgen_len = 0; |
| module_param(dhd_pktgen_len, uint, 0); |
| #endif /* SDTEST */ |
| |
| |
| |
| /* Allow delayed firmware download for debug purpose */ |
| int allow_delay_fwdl = FALSE; |
| module_param(allow_delay_fwdl, int, 0); |
| |
| extern char dhd_version[]; |
| extern char fw_version[]; |
| extern char clm_version[]; |
| |
| int dhd_net_bus_devreset(struct net_device *dev, uint8 flag); |
| static void dhd_net_if_lock_local(dhd_info_t *dhd); |
| static void dhd_net_if_unlock_local(dhd_info_t *dhd); |
| static void dhd_suspend_lock(dhd_pub_t *dhdp); |
| static void dhd_suspend_unlock(dhd_pub_t *dhdp); |
| |
| #ifdef WLMEDIA_HTSF |
| void htsf_update(dhd_info_t *dhd, void *data); |
| tsf_t prev_tsf, cur_tsf; |
| |
| uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx); |
| static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx); |
| static void dhd_dump_latency(void); |
| static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf); |
| static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf); |
| static void dhd_dump_htsfhisto(histo_t *his, char *s); |
| #endif /* WLMEDIA_HTSF */ |
| |
| /* Monitor interface */ |
| int dhd_monitor_init(void *dhd_pub); |
| int dhd_monitor_uninit(void); |
| |
| |
| #if defined(WL_WIRELESS_EXT) |
| struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev); |
| #endif /* defined(WL_WIRELESS_EXT) */ |
| |
| static void dhd_dpc(ulong data); |
| /* forward decl */ |
| extern int dhd_wait_pend8021x(struct net_device *dev); |
| void dhd_os_wd_timer_extend(void *bus, bool extend); |
| |
| #ifdef TOE |
| #ifndef BDC |
| #error TOE requires BDC |
| #endif /* !BDC */ |
| static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol); |
| static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol); |
| #endif /* TOE */ |
| |
| static int dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen, |
| wl_event_msg_t *event_ptr, void **data_ptr); |
| |
| #if defined(CONFIG_PM_SLEEP) |
| static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored) |
| { |
| int ret = NOTIFY_DONE; |
| bool suspend = FALSE; |
| |
| #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) |
| #pragma GCC diagnostic push |
| #pragma GCC diagnostic ignored "-Wcast-qual" |
| #endif |
| dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier); |
| #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) |
| #pragma GCC diagnostic pop |
| #endif |
| |
| BCM_REFERENCE(dhdinfo); |
| BCM_REFERENCE(suspend); |
| |
| switch (action) { |
| case PM_HIBERNATION_PREPARE: |
| case PM_SUSPEND_PREPARE: |
| suspend = TRUE; |
| break; |
| |
| case PM_POST_HIBERNATION: |
| case PM_POST_SUSPEND: |
| suspend = FALSE; |
| break; |
| } |
| |
| #if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) |
| if (suspend) { |
| DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub); |
| dhd_wlfc_suspend(&dhdinfo->pub); |
| DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub); |
| } else { |
| dhd_wlfc_resume(&dhdinfo->pub); |
| } |
| #endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */ |
| |
| #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \ |
| KERNEL_VERSION(2, 6, 39)) |
| dhd_mmc_suspend = suspend; |
| smp_mb(); |
| #endif |
| |
| return ret; |
| } |
| |
| /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be |
| * created in kernel notifier link list (with 'next' pointing to itself) |
| */ |
| static bool dhd_pm_notifier_registered = FALSE; |
| |
| extern int register_pm_notifier(struct notifier_block *nb); |
| extern int unregister_pm_notifier(struct notifier_block *nb); |
| #endif /* CONFIG_PM_SLEEP */ |
| |
| /* Request scheduling of the bus rx frame */ |
| static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb); |
| static void dhd_os_rxflock(dhd_pub_t *pub); |
| static void dhd_os_rxfunlock(dhd_pub_t *pub); |
| |
| /** priv_link is the link between netdev and the dhdif and dhd_info structs. */ |
| typedef struct dhd_dev_priv { |
| dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */ |
| dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */ |
| int ifidx; /* interface index */ |
| void * lkup; |
| } dhd_dev_priv_t; |
| |
| #define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t)) |
| #define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev)) |
| #define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd) |
| #define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp) |
| #define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx) |
| #define DHD_DEV_LKUP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->lkup) |
| |
| #if defined(DHD_OF_SUPPORT) |
| extern int dhd_wlan_init(void); |
| #endif /* defined(DHD_OF_SUPPORT) */ |
| /** Clear the dhd net_device's private structure. */ |
| static inline void |
| dhd_dev_priv_clear(struct net_device * dev) |
| { |
| dhd_dev_priv_t * dev_priv; |
| ASSERT(dev != (struct net_device *)NULL); |
| dev_priv = DHD_DEV_PRIV(dev); |
| dev_priv->dhd = (dhd_info_t *)NULL; |
| dev_priv->ifp = (dhd_if_t *)NULL; |
| dev_priv->ifidx = DHD_BAD_IF; |
| dev_priv->lkup = (void *)NULL; |
| } |
| |
| /** Setup the dhd net_device's private structure. */ |
| static inline void |
| dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp, |
| int ifidx) |
| { |
| dhd_dev_priv_t * dev_priv; |
| ASSERT(dev != (struct net_device *)NULL); |
| dev_priv = DHD_DEV_PRIV(dev); |
| dev_priv->dhd = dhd; |
| dev_priv->ifp = ifp; |
| dev_priv->ifidx = ifidx; |
| } |
| |
| #ifdef PCIE_FULL_DONGLE |
| |
| /** Dummy objects are defined with state representing bad|down. |
| * Performance gains from reducing branch conditionals, instruction parallelism, |
| * dual issue, reducing load shadows, avail of larger pipelines. |
| * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer |
| * is accessed via the dhd_sta_t. |
| */ |
| |
| /* Dummy dhd_info object */ |
| dhd_info_t dhd_info_null = { |
| #if defined(BCM_GMAC3) |
| .fwdh = FWDER_NULL, |
| #endif |
| .pub = { |
| .info = &dhd_info_null, |
| #ifdef DHDTCPACK_SUPPRESS |
| .tcpack_sup_mode = TCPACK_SUP_REPLACE, |
| #endif /* DHDTCPACK_SUPPRESS */ |
| #if defined(TRAFFIC_MGMT_DWM) |
| .dhd_tm_dwm_tbl = { .dhd_dwm_enabled = TRUE }, |
| #endif |
| .up = FALSE, |
| .busstate = DHD_BUS_DOWN |
| } |
| }; |
| #define DHD_INFO_NULL (&dhd_info_null) |
| #define DHD_PUB_NULL (&dhd_info_null.pub) |
| |
| /* Dummy netdevice object */ |
| struct net_device dhd_net_dev_null = { |
| .reg_state = NETREG_UNREGISTERED |
| }; |
| #define DHD_NET_DEV_NULL (&dhd_net_dev_null) |
| |
| /* Dummy dhd_if object */ |
| dhd_if_t dhd_if_null = { |
| #if defined(BCM_GMAC3) |
| .fwdh = FWDER_NULL, |
| #endif |
| #ifdef WMF |
| .wmf = { .wmf_enable = TRUE }, |
| #endif |
| .info = DHD_INFO_NULL, |
| .net = DHD_NET_DEV_NULL, |
| .idx = DHD_BAD_IF |
| }; |
| #define DHD_IF_NULL (&dhd_if_null) |
| |
| #define DHD_STA_NULL ((dhd_sta_t *)NULL) |
| |
| /** Interface STA list management. */ |
| |
| /** Fetch the dhd_if object, given the interface index in the dhd. */ |
| static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx); |
| |
| /** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */ |
| static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta); |
| static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp); |
| |
| /* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */ |
| static void dhd_if_del_sta_list(dhd_if_t * ifp); |
| static void dhd_if_flush_sta(dhd_if_t * ifp); |
| |
| /* Construct/Destruct a sta pool. */ |
| static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta); |
| static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta); |
| /* Clear the pool of dhd_sta_t objects for built-in type driver */ |
| static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta); |
| |
| |
| /* Return interface pointer */ |
| static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx) |
| { |
| ASSERT(ifidx < DHD_MAX_IFS); |
| |
| if (ifidx >= DHD_MAX_IFS) |
| return NULL; |
| |
| return dhdp->info->iflist[ifidx]; |
| } |
| |
| /** Reset a dhd_sta object and free into the dhd pool. */ |
| static void |
| dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta) |
| { |
| int prio; |
| |
| ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID)); |
| |
| ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL)); |
| |
| /* |
| * Flush and free all packets in all flowring's queues belonging to sta. |
| * Packets in flow ring will be flushed later. |
| */ |
| for (prio = 0; prio < (int)NUMPRIO; prio++) { |
| uint16 flowid = sta->flowid[prio]; |
| |
| if (flowid != FLOWID_INVALID) { |
| unsigned long flags; |
| flow_queue_t * queue = dhd_flow_queue(dhdp, flowid); |
| flow_ring_node_t * flow_ring_node; |
| |
| #ifdef DHDTCPACK_SUPPRESS |
| /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, |
| * when there is a newly coming packet from network stack. |
| */ |
| dhd_tcpack_info_tbl_clean(dhdp); |
| #endif /* DHDTCPACK_SUPPRESS */ |
| |
| flow_ring_node = dhd_flow_ring_node(dhdp, flowid); |
| DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); |
| flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING; |
| |
| if (!DHD_FLOW_QUEUE_EMPTY(queue)) { |
| void * pkt; |
| while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) != NULL) { |
| PKTFREE(dhdp->osh, pkt, TRUE); |
| } |
| } |
| |
| DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); |
| ASSERT(DHD_FLOW_QUEUE_EMPTY(queue)); |
| } |
| |
| sta->flowid[prio] = FLOWID_INVALID; |
| } |
| |
| id16_map_free(dhdp->staid_allocator, sta->idx); |
| DHD_CUMM_CTR_INIT(&sta->cumm_ctr); |
| sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */ |
| sta->ifidx = DHD_BAD_IF; |
| bzero(sta->ea.octet, ETHER_ADDR_LEN); |
| INIT_LIST_HEAD(&sta->list); |
| sta->idx = ID16_INVALID; /* implying free */ |
| } |
| |
| /** Allocate a dhd_sta object from the dhd pool. */ |
| static dhd_sta_t * |
| dhd_sta_alloc(dhd_pub_t * dhdp) |
| { |
| uint16 idx; |
| dhd_sta_t * sta; |
| dhd_sta_pool_t * sta_pool; |
| |
| ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL)); |
| |
| idx = id16_map_alloc(dhdp->staid_allocator); |
| if (idx == ID16_INVALID) { |
| DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__)); |
| return DHD_STA_NULL; |
| } |
| |
| sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool); |
| sta = &sta_pool[idx]; |
| |
| ASSERT((sta->idx == ID16_INVALID) && |
| (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF)); |
| |
| DHD_CUMM_CTR_INIT(&sta->cumm_ctr); |
| |
| sta->idx = idx; /* implying allocated */ |
| |
| return sta; |
| } |
| |
| /** Delete all STAs in an interface's STA list. */ |
| static void |
| dhd_if_del_sta_list(dhd_if_t *ifp) |
| { |
| dhd_sta_t *sta, *next; |
| unsigned long flags; |
| |
| DHD_IF_STA_LIST_LOCK(ifp, flags); |
| #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) |
| #pragma GCC diagnostic push |
| #pragma GCC diagnostic ignored "-Wcast-qual" |
| #endif |
| list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { |
| #if defined(BCM_GMAC3) |
| if (ifp->fwdh) { |
| /* Remove sta from WOFA forwarder. */ |
| fwder_deassoc(ifp->fwdh, (uint16 *)(sta->ea.octet), (uintptr_t)sta); |
| } |
| #endif /* BCM_GMAC3 */ |
| list_del(&sta->list); |
| dhd_sta_free(&ifp->info->pub, sta); |
| } |
| #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) |
| #pragma GCC diagnostic pop |
| #endif |
| DHD_IF_STA_LIST_UNLOCK(ifp, flags); |
| |
| return; |
| } |
| |
| /** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */ |
| static void |
| dhd_if_flush_sta(dhd_if_t * ifp) |
| { |
| #if defined(BCM_GMAC3) |
| |
| if (ifp && (ifp->fwdh != FWDER_NULL)) { |
| dhd_sta_t *sta, *next; |
| unsigned long flags; |
| |
| DHD_IF_STA_LIST_LOCK(ifp, flags); |
| |
| list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { |
| /* Remove any sta entry from WOFA forwarder. */ |
| fwder_flush(ifp->fwdh, (uintptr_t)sta); |
| } |
| |
| DHD_IF_STA_LIST_UNLOCK(ifp, flags); |
| } |
| #endif /* BCM_GMAC3 */ |
| } |
| |
| /** Construct a pool of dhd_sta_t objects to be used by interfaces. */ |
| static int |
| dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) |
| { |
| int idx, prio, sta_pool_memsz; |
| dhd_sta_t * sta; |
| dhd_sta_pool_t * sta_pool; |
| void * staid_allocator; |
| |
| ASSERT(dhdp != (dhd_pub_t *)NULL); |
| ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL)); |
| |
| /* dhd_sta objects per radio are managed in a table. id#0 reserved. */ |
| staid_allocator = id16_map_init(dhdp->osh, max_sta, 1); |
| if (staid_allocator == NULL) { |
| DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__)); |
| return BCME_ERROR; |
| } |
| |
| /* Pre allocate a pool of dhd_sta objects (one extra). */ |
| sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */ |
| sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz); |
| if (sta_pool == NULL) { |
| DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__)); |
| id16_map_fini(dhdp->osh, staid_allocator); |
| return BCME_ERROR; |
| } |
| |
| dhdp->sta_pool = sta_pool; |
| dhdp->staid_allocator = staid_allocator; |
| |
| /* Initialize all sta(s) for the pre-allocated free pool. */ |
| bzero((uchar *)sta_pool, sta_pool_memsz); |
| for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */ |
| sta = &sta_pool[idx]; |
| sta->idx = id16_map_alloc(staid_allocator); |
| ASSERT(sta->idx <= max_sta); |
| } |
| /* Now place them into the pre-allocated free pool. */ |
| for (idx = 1; idx <= max_sta; idx++) { |
| sta = &sta_pool[idx]; |
| for (prio = 0; prio < (int)NUMPRIO; prio++) { |
| sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */ |
| } |
| dhd_sta_free(dhdp, sta); |
| } |
| |
| return BCME_OK; |
| } |
| |
| /** Destruct the pool of dhd_sta_t objects. |
| * Caller must ensure that no STA objects are currently associated with an if. |
| */ |
| static void |
| dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) |
| { |
| dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool; |
| |
| if (sta_pool) { |
| int idx; |
| int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); |
| for (idx = 1; idx <= max_sta; idx++) { |
| ASSERT(sta_pool[idx].ifp == DHD_IF_NULL); |
| ASSERT(sta_pool[idx].idx == ID16_INVALID); |
| } |
| MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz); |
| dhdp->sta_pool = NULL; |
| } |
| |
| id16_map_fini(dhdp->osh, dhdp->staid_allocator); |
| dhdp->staid_allocator = NULL; |
| } |
| |
| /* Clear the pool of dhd_sta_t objects for built-in type driver */ |
| static void |
| dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) |
| { |
| int idx, prio, sta_pool_memsz; |
| dhd_sta_t * sta; |
| dhd_sta_pool_t * sta_pool; |
| void *staid_allocator; |
| |
| if (!dhdp) { |
| DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); |
| return; |
| } |
| |
| sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool; |
| staid_allocator = dhdp->staid_allocator; |
| |
| if (!sta_pool) { |
| DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__)); |
| return; |
| } |
| |
| if (!staid_allocator) { |
| DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__)); |
| return; |
| } |
| |
| /* clear free pool */ |
| sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); |
| bzero((uchar *)sta_pool, sta_pool_memsz); |
| |
| /* dhd_sta objects per radio are managed in a table. id#0 reserved. */ |
| id16_map_clear(staid_allocator, max_sta, 1); |
| |
| /* Initialize all sta(s) for the pre-allocated free pool. */ |
| for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */ |
| sta = &sta_pool[idx]; |
| sta->idx = id16_map_alloc(staid_allocator); |
| ASSERT(sta->idx <= max_sta); |
| } |
| /* Now place them into the pre-allocated free pool. */ |
| for (idx = 1; idx <= max_sta; idx++) { |
| sta = &sta_pool[idx]; |
| for (prio = 0; prio < (int)NUMPRIO; prio++) { |
| sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */ |
| } |
| dhd_sta_free(dhdp, sta); |
| } |
| } |
| |
| /** Find STA with MAC address ea in an interface's STA list. */ |
| dhd_sta_t * |
| dhd_find_sta(void *pub, int ifidx, void *ea) |
| { |
| dhd_sta_t *sta; |
| dhd_if_t *ifp; |
| unsigned long flags; |
| |
| ASSERT(ea != NULL); |
| ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); |
| if (ifp == NULL) |
| return DHD_STA_NULL; |
| |
| DHD_IF_STA_LIST_LOCK(ifp, flags); |
| #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) |
| #pragma GCC diagnostic push |
| #pragma GCC diagnostic ignored "-Wcast-qual" |
| #endif |
| list_for_each_entry(sta, &ifp->sta_list, list) { |
| if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) { |
| DHD_INFO(("%s: found STA " MACDBG "\n", |
| __FUNCTION__, MAC2STRDBG((char *)ea))); |
| DHD_IF_STA_LIST_UNLOCK(ifp, flags); |
| return sta; |
| } |
| } |
| #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) |
| #pragma GCC diagnostic pop |
| #endif |
| DHD_IF_STA_LIST_UNLOCK(ifp, flags); |
| |
| return DHD_STA_NULL; |
| } |
| |
| /** Add STA into the interface's STA list. */ |
| dhd_sta_t * |
| dhd_add_sta(void *pub, int ifidx, void *ea) |
| { |
| dhd_sta_t *sta; |
| dhd_if_t *ifp; |
| unsigned long flags; |
| |
| ASSERT(ea != NULL); |
| ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); |
| if (ifp == NULL) |
| return DHD_STA_NULL; |
| |
| sta = dhd_sta_alloc((dhd_pub_t *)pub); |
| if (sta == DHD_STA_NULL) { |
| DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__)); |
| return DHD_STA_NULL; |
| } |
| |
| memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN); |
| |
| /* link the sta and the dhd interface */ |
| sta->ifp = ifp; |
| sta->ifidx = ifidx; |
| #ifdef DHD_WMF |
| sta->psta_prim = NULL; |
| #endif |
| INIT_LIST_HEAD(&sta->list); |
| |
| DHD_IF_STA_LIST_LOCK(ifp, flags); |
| |
| list_add_tail(&sta->list, &ifp->sta_list); |
| |
| #if defined(BCM_GMAC3) |
| if (ifp->fwdh) { |
| ASSERT(ISALIGNED(ea, 2)); |
| /* Add sta to WOFA forwarder. */ |
| fwder_reassoc(ifp->fwdh, (uint16 *)ea, (uintptr_t)sta); |
| } |
| #endif /* BCM_GMAC3 */ |
| |
| DHD_IF_STA_LIST_UNLOCK(ifp, flags); |
| |
| return sta; |
| } |
| |
| /** Delete all STAs from the interface's STA list. */ |
| void |
| dhd_del_all_sta(void *pub, int ifidx) |
| { |
| dhd_sta_t *sta, *next; |
| dhd_if_t *ifp; |
| unsigned long flags; |
| |
| ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); |
| if (ifp == NULL) |
| return; |
| |
| DHD_IF_STA_LIST_LOCK(ifp, flags); |
| #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) |
| #pragma GCC diagnostic push |
| #pragma GCC diagnostic ignored "-Wcast-qual" |
| #endif |
| list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { |
| #if defined(BCM_GMAC3) |
| if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */ |
| ASSERT(ISALIGNED(sta->ea.octet, 2)); |
| fwder_deassoc(ifp->fwdh, (uint16 *)sta->ea.octet, (uintptr_t)sta); |
| } |
| #endif /* BCM_GMAC3 */ |
| |
| list_del(&sta->list); |
| dhd_sta_free(&ifp->info->pub, sta); |
| #ifdef DHD_L2_FILTER |
| if (ifp->parp_enable) { |
| /* clear Proxy ARP cache of specific Ethernet Address */ |
| bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, |
| ifp->phnd_arp_table, FALSE, |
| sta->ea.octet, FALSE, ((dhd_pub_t*)pub)->tickcnt); |
| } |
| #endif /* DHD_L2_FILTER */ |
| } |
| #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) |
| #pragma GCC diagnostic pop |
| #endif |
| DHD_IF_STA_LIST_UNLOCK(ifp, flags); |
| |
| return; |
| } |
| |
| /** Delete STA from the interface's STA list. */ |
| void |
| dhd_del_sta(void *pub, int ifidx, void *ea) |
| { |
| dhd_sta_t *sta, *next; |
| dhd_if_t *ifp; |
| unsigned long flags; |
| char macstr[ETHER_ADDR_STR_LEN]; |
| |
| ASSERT(ea != NULL); |
| ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); |
| if (ifp == NULL) |
| return; |
| |
| DHD_IF_STA_LIST_LOCK(ifp, flags); |
| #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) |
| #pragma GCC diagnostic push |
| #pragma GCC diagnostic ignored "-Wcast-qual" |
| #endif |
| list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { |
| if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) { |
| #if defined(BCM_GMAC3) |
| if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */ |
| ASSERT(ISALIGNED(ea, 2)); |
| fwder_deassoc(ifp->fwdh, (uint16 *)ea, (uintptr_t)sta); |
| } |
| #endif /* BCM_GMAC3 */ |
| DHD_MAC_TO_STR(((char *)ea), macstr); |
| DHD_ERROR(("%s: Deleting STA %s\n", __FUNCTION__, macstr)); |
| list_del(&sta->list); |
| dhd_sta_free(&ifp->info->pub, sta); |
| } |
| } |
| #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) |
| #pragma GCC diagnostic pop |
| #endif |
| DHD_IF_STA_LIST_UNLOCK(ifp, flags); |
| #ifdef DHD_L2_FILTER |
| if (ifp->parp_enable) { |
| /* clear Proxy ARP cache of specific Ethernet Address */ |
| bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE, |
| ea, FALSE, ((dhd_pub_t*)pub)->tickcnt); |
| } |
| #endif /* DHD_L2_FILTER */ |
| return; |
| } |
| |
| /** Add STA if it doesn't exist. Not reentrant. */ |
| dhd_sta_t* |
| dhd_findadd_sta(void *pub, int ifidx, void *ea) |
| { |
| dhd_sta_t *sta; |
| |
| sta = dhd_find_sta(pub, ifidx, ea); |
| |
| if (!sta) { |
| /* Add entry */ |
| sta = dhd_add_sta(pub, ifidx, ea); |
| } |
| |
| return sta; |
| } |
| |
| #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) |
| #if !defined(BCM_GMAC3) |
| static struct list_head * |
| dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list) |
| { |
| unsigned long flags; |
| dhd_sta_t *sta, *snapshot; |
| |
| INIT_LIST_HEAD(snapshot_list); |
| |
| DHD_IF_STA_LIST_LOCK(ifp, flags); |
| |
| list_for_each_entry(sta, &ifp->sta_list, list) { |
| /* allocate one and add to snapshot */ |
| snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t)); |
| if (snapshot == NULL) { |
| DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__)); |
| continue; |
| } |
| |
| memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN); |
| |
| INIT_LIST_HEAD(&snapshot->list); |
| list_add_tail(&snapshot->list, snapshot_list); |
| } |
| |
| DHD_IF_STA_LIST_UNLOCK(ifp, flags); |
| |
| return snapshot_list; |
| } |
| |
| static void |
| dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list) |
| { |
| dhd_sta_t *sta, *next; |
| |
| list_for_each_entry_safe(sta, next, snapshot_list, list) { |
| list_del(&sta->list); |
| MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t)); |
| } |
| } |
| #endif /* !BCM_GMAC3 */ |
| #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */ |
| |
| #else |
| static inline void dhd_if_flush_sta(dhd_if_t * ifp) { } |
| static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {} |
| static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; } |
| static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {} |
| static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {} |
| dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; } |
| dhd_sta_t *dhd_find_sta(void *pub, int ifidx, void *ea) { return NULL; } |
| void dhd_del_sta(void *pub, int ifidx, void *ea) {} |
| #endif /* PCIE_FULL_DONGLE */ |
| |
| |
| |
| #if defined(DHD_LB) |
| |
| #if defined(DHD_LB_TXC) || defined(DHD_LB_RXC) || defined(DHD_LB_TXP) |
| /** |
| * dhd_tasklet_schedule - Function that runs in IPI context of the destination |
| * CPU and schedules a tasklet. |
| * @tasklet: opaque pointer to the tasklet |
| */ |
| INLINE void |
| dhd_tasklet_schedule(void *tasklet) |
| { |
| tasklet_schedule((struct tasklet_struct *)tasklet); |
| } |
| /** |
| * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU |
| * @tasklet: tasklet to be scheduled |
| * @on_cpu: cpu core id |
| * |
| * If the requested cpu is online, then an IPI is sent to this cpu via the |
| * smp_call_function_single with no wait and the tasklet_schedule function |
| * will be invoked to schedule the specified tasklet on the requested CPU. |
| */ |
| INLINE void |
| dhd_tasklet_schedule_on(struct tasklet_struct *tasklet, int on_cpu) |
| { |
| const int wait = 0; |
| smp_call_function_single(on_cpu, |
| dhd_tasklet_schedule, (void *)tasklet, wait); |
| } |
| |
| /** |
| * dhd_work_schedule_on - Executes the passed work in a given CPU |
| * @work: work to be scheduled |
| * @on_cpu: cpu core id |
| * |
| * If the requested cpu is online, then an IPI is sent to this cpu via the |
| * schedule_work_on and the work function |
| * will be invoked to schedule the specified work on the requested CPU. |
| */ |
| |
| INLINE void |
| dhd_work_schedule_on(struct work_struct *work, int on_cpu) |
| { |
| schedule_work_on(on_cpu, work); |
| } |
| #endif /* DHD_LB_TXC || DHD_LB_RXC || DHD_LB_TXP */ |
| |
| #if defined(DHD_LB_TXC) |
| /** |
| * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet |
| * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and |
| * freeing the packets placed in the tx_compl workq |
| */ |
| void |
| dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp) |
| { |
| dhd_info_t *dhd = dhdp->info; |
| int curr_cpu, on_cpu; |
| |
| if (dhd->rx_napi_netdev == NULL) { |
| DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__)); |
| return; |
| } |
| |
| DHD_LB_STATS_INCR(dhd->txc_sched_cnt); |
| /* |
| * If the destination CPU is NOT online or is same as current CPU |
| * no need to schedule the work |
| */ |
| curr_cpu = get_cpu(); |
| put_cpu(); |
| |
| on_cpu = atomic_read(&dhd->tx_compl_cpu); |
| |
| if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) { |
| dhd_tasklet_schedule(&dhd->tx_compl_tasklet); |
| } else { |
| schedule_work(&dhd->tx_compl_dispatcher_work); |
| } |
| } |
| |
| static void dhd_tx_compl_dispatcher_fn(struct work_struct * work) |
| { |
| struct dhd_info *dhd = |
| container_of(work, struct dhd_info, tx_compl_dispatcher_work); |
| int cpu; |
| |
| get_online_cpus(); |
| cpu = atomic_read(&dhd->tx_compl_cpu); |
| if (!cpu_online(cpu)) |
| dhd_tasklet_schedule(&dhd->tx_compl_tasklet); |
| else |
| dhd_tasklet_schedule_on(&dhd->tx_compl_tasklet, cpu); |
| put_online_cpus(); |
| } |
| #endif /* DHD_LB_TXC */ |
| |
| #if defined(DHD_LB_RXC) |
| /** |
| * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet |
| * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers |
| * in the H2D RxBuffer Post common ring, by using the recycled pktids that were |
| * placed in the rx_compl workq. |
| * |
| * @dhdp: pointer to dhd_pub object |
| */ |
| void |
| dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp) |
| { |
| dhd_info_t *dhd = dhdp->info; |
| int curr_cpu, on_cpu; |
| |
| if (dhd->rx_napi_netdev == NULL) { |
| DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__)); |
| return; |
| } |
| |
| DHD_LB_STATS_INCR(dhd->rxc_sched_cnt); |
| /* |
| * If the destination CPU is NOT online or is same as current CPU |
| * no need to schedule the work |
| */ |
| curr_cpu = get_cpu(); |
| put_cpu(); |
| on_cpu = atomic_read(&dhd->rx_compl_cpu); |
| |
| if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) { |
| dhd_tasklet_schedule(&dhd->rx_compl_tasklet); |
| } else { |
| dhd_rx_compl_dispatcher_fn(dhdp); |
| } |
| } |
| |
| static void dhd_rx_compl_dispatcher_fn(dhd_pub_t *dhdp) |
| { |
| struct dhd_info *dhd = dhdp->info; |
| int cpu; |
| |
| preempt_disable(); |
| cpu = atomic_read(&dhd->rx_compl_cpu); |
| if (!cpu_online(cpu)) |
| dhd_tasklet_schedule(&dhd->rx_compl_tasklet); |
| else { |
| dhd_tasklet_schedule_on(&dhd->rx_compl_tasklet, cpu); |
| } |
| preempt_enable(); |
| } |
| #endif /* DHD_LB_RXC */ |
| |
| #if defined(DHD_LB_TXP) |
| static void dhd_tx_dispatcher_work(struct work_struct * work) |
| { |
| #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) |
| #pragma GCC diagnostic push |
| #pragma GCC diagnostic ignored "-Wcast-qual" |
| #endif |
| struct dhd_info *dhd = |
| container_of(work, struct dhd_info, tx_dispatcher_work); |
| #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) |
| #pragma GCC diagnostic pop |
| #endif |
| dhd_tasklet_schedule(&dhd->tx_tasklet); |
| } |
| |
| static void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp) |
| { |
| int cpu; |
| int net_tx_cpu; |
| dhd_info_t *dhd = dhdp->info; |
| |
| preempt_disable(); |
| cpu = atomic_read(&dhd->tx_cpu); |
| net_tx_cpu = atomic_read(&dhd->net_tx_cpu); |
| |
| /* |
| * Now if the NET_TX has pushed the packet in the same |
| * CPU that is chosen for Tx processing, seperate it out |
| * i.e run the TX processing tasklet in compl_cpu |
| */ |
| if (net_tx_cpu == cpu) |
| cpu = atomic_read(&dhd->tx_compl_cpu); |
| |
| if (!cpu_online(cpu)) { |
| /* |
| * Ooohh... but the Chosen CPU is not online, |
| * Do the job in the current CPU itself. |
| */ |
| dhd_tasklet_schedule(&dhd->tx_tasklet); |
| } else { |
| /* |
| * Schedule tx_dispatcher_work to on the cpu which |
| * in turn will schedule tx_tasklet. |
| */ |
| dhd_work_schedule_on(&dhd->tx_dispatcher_work, cpu); |
| } |
| preempt_enable(); |
| } |
| |
| /** |
| * dhd_lb_tx_dispatch - load balance by dispatching the tx_tasklet |
| * on another cpu. The tx_tasklet will take care of actually putting |
| * the skbs into appropriate flow ring and ringing H2D interrupt |
| * |
| * @dhdp: pointer to dhd_pub object |
| */ |
| static void |
| dhd_lb_tx_dispatch(dhd_pub_t *dhdp) |
| { |
| dhd_info_t *dhd = dhdp->info; |
| int curr_cpu; |
| |
| curr_cpu = get_cpu(); |
| put_cpu(); |
| |
| /* Record the CPU in which the TX request from Network stack came */ |
| atomic_set(&dhd->net_tx_cpu, curr_cpu); |
| |
| /* Schedule the work to dispatch ... */ |
| dhd_tx_dispatcher_fn(dhdp); |
| |
| } |
| #endif /* DHD_LB_TXP */ |
| |
| #if defined(DHD_LB_RXP) |
| /** |
| * dhd_napi_poll - Load balance napi poll function to process received |
| * packets and send up the network stack using netif_receive_skb() |
| * |
| * @napi: napi object in which context this poll function is invoked |
| * @budget: number of packets to be processed. |
| * |
| * Fetch the dhd_info given the rx_napi_struct. Move all packets from the |
| * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock). |
| * Dequeue each packet from head of rx_process_queue, fetch the ifid from the |
| * packet tag and sendup. |
| */ |
| static int |
| dhd_napi_poll(struct napi_struct *napi, int budget) |
| { |
| int ifid; |
| const int pkt_count = 1; |
| const int chan = 0; |
| struct sk_buff * skb; |
| unsigned long flags; |
| struct dhd_info *dhd; |
| int processed = 0; |
| struct sk_buff_head rx_process_queue; |
| |
| #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) |
| #pragma GCC diagnostic push |
| #pragma GCC diagnostic ignored "-Wcast-qual" |
| #endif |
| dhd = container_of(napi, struct dhd_info, rx_napi_struct); |
| #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) |
| #pragma GCC diagnostic pop |
| #endif |
| |
| DHD_INFO(("%s napi_queue<%d> budget<%d>\n", |
| __FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget)); |
| __skb_queue_head_init(&rx_process_queue); |
| |
| /* extract the entire rx_napi_queue into local rx_process_queue */ |
| spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags); |
| skb_queue_splice_tail_init(&dhd->rx_napi_queue, &rx_process_queue); |
| spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags); |
| |
| while ((skb = __skb_dequeue(&rx_process_queue)) != NULL) { |
| OSL_PREFETCH(skb->data); |
| |
| ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb)); |
| |
| DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n", |
| __FUNCTION__, skb, ifid)); |
| |
| dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan); |
| processed++; |
| } |
| |
| DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed); |
| |
| DHD_INFO(("%s processed %d\n", __FUNCTION__, processed)); |
| napi_complete(napi); |
| |
| return budget - 1; |
| } |
| |
| /** |
| * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi |
| * poll list. This function may be invoked via the smp_call_function_single |
| * from a remote CPU. |
| * |
| * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ) |
| * after the napi_struct is added to the softnet data's poll_list |
| * |
| * @info: pointer to a dhd_info struct |
| */ |
| static void |
| dhd_napi_schedule(void *info) |
| { |
| dhd_info_t *dhd = (dhd_info_t *)info; |
| |
| DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n", |
| __FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu))); |
| |
| /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */ |
| if (napi_schedule_prep(&dhd->rx_napi_struct)) { |
| __napi_schedule(&dhd->rx_napi_struct); |
| DHD_LB_STATS_PERCPU_ARR_INCR(dhd->napi_percpu_run_cnt); |
| } |
| |
| /* |
| * If the rx_napi_struct was already running, then we let it complete |
| * processing all its packets. The rx_napi_struct may only run on one |
| * core at a time, to avoid out-of-order handling. |
| */ |
| } |
| |
| /** |
| * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ |
| * action after placing the dhd's rx_process napi object in the the remote CPU's |
| * softnet data's poll_list. |
| * |
| * @dhd: dhd_info which has the rx_process napi object |
| * @on_cpu: desired remote CPU id |
| */ |
| static INLINE int |
| dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu) |
| { |
| int wait = 0; /* asynchronous IPI */ |
| DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n", |
| __FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu)); |
| |
| if (smp_call_function_single(on_cpu, dhd_napi_schedule, dhd, wait)) { |
| DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n", |
| __FUNCTION__, on_cpu)); |
| } |
| |
| DHD_LB_STATS_INCR(dhd->napi_sched_cnt); |
| |
| return 0; |
| } |
| |
| /* |
| * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on |
| * Why should we do this? |
| * The candidacy algorithm is run from the call back function |
| * registered to CPU hotplug notifier. This call back happens from Worker |
| * context. The dhd_napi_schedule_on is also from worker context. |
| * Note that both of this can run on two different CPUs at the same time. |
| * So we can possibly have a window where a given CPUn is being brought |
| * down from CPUm while we try to run a function on CPUn. |
| * To prevent this its better have the whole code to execute an SMP |
| * function under get_online_cpus. |
| * This function call ensures that hotplug mechanism does not kick-in |
| * until we are done dealing with online CPUs |
| * If the hotplug worker is already running, no worries because the |
| * candidacy algo would then reflect the same in dhd->rx_napi_cpu. |
| * |
| * The below mentioned code structure is proposed in |
| * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt |
| * for the question |
| * Q: I need to ensure that a particular cpu is not removed when there is some |
| * work specific to this cpu is in progress |
| * |
| * According to the documentation calling get_online_cpus is NOT required, if |
| * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can |
| * run from Work Queue context we have to call these functions |
| */ |
| static void dhd_rx_napi_dispatcher_fn(struct work_struct * work) |
| { |
| #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) |
| #pragma GCC diagnostic push |
| #pragma GCC diagnostic ignored "-Wcast-qual" |
| #endif |
| struct dhd_info *dhd = |
| container_of(work, struct dhd_info, rx_napi_dispatcher_work); |
| #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) |
| #pragma GCC diagnostic pop |
| #endif |
| int cpu; |
| |
| get_online_cpus(); |
| cpu = atomic_read(&dhd->rx_napi_cpu); |
| |
| if (!cpu_online(cpu)) |
| dhd_napi_schedule(dhd); |
| else |
| dhd_napi_schedule_on(dhd, cpu); |
| |
| put_online_cpus(); |
| } |
| |
| /** |
| * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct |
| * to run on another CPU. The rx_napi_struct's poll function will retrieve all |
| * the packets enqueued into the rx_napi_queue and sendup. |
| * The producer's rx packet queue is appended to the rx_napi_queue before |
| * dispatching the rx_napi_struct. |
| */ |
| void |
| dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp) |
| { |
| unsigned long flags; |
| dhd_info_t *dhd = dhdp->info; |
| int curr_cpu; |
| int on_cpu; |
| |
| if (dhd->rx_napi_netdev == NULL) { |
| DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__)); |
| return; |
| } |
| |
| DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__, |
| skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue))); |
| |
| /* append the producer's queue of packets to the napi's rx process queue */ |
| spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags); |
| skb_queue_splice_tail_init(&dhd->rx_pend_queue, &dhd->rx_napi_queue); |
| spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags); |
| |
| /* |
| * If the destination CPU is NOT online or is same as current CPU |
| * no need to schedule the work |
| */ |
| curr_cpu = get_cpu(); |
| put_cpu(); |
| |
| on_cpu = atomic_read(&dhd->rx_napi_cpu); |
| if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) { |
| dhd_napi_schedule(dhd); |
| } else { |
| schedule_work(&dhd->rx_napi_dispatcher_work); |
| } |
| } |
| |
| /** |
| * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue |
| */ |
| void |
| dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx) |
| { |
| dhd_info_t *dhd = dhdp->info; |
| |
| DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__, |
| pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue))); |
| DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx); |
| __skb_queue_tail(&dhd->rx_pend_queue, pkt); |
| } |
| #endif /* DHD_LB_RXP */ |
| |
| #endif /* DHD_LB */ |
| |
| |
| /** Returns dhd iflist index corresponding the the bssidx provided by apps */ |
| int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx) |
| { |
| dhd_if_t *ifp; |
| dhd_info_t *dhd = dhdp->info; |
| int i; |
| |
| ASSERT(bssidx < DHD_MAX_IFS); |
| ASSERT(dhdp); |
| |
| for (i = 0; i < DHD_MAX_IFS; i++) { |
| ifp = dhd->iflist[i]; |
| if (ifp && (ifp->bssidx == bssidx)) { |
| DHD_TRACE(("Index manipulated for %s from %d to %d\n", |
| ifp->name, bssidx, i)); |
| break; |
| } |
| } |
| return i; |
| } |
| |
| static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb) |
| { |
| uint32 store_idx; |
| uint32 sent_idx; |
| |
| if (!skb) { |
| DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n")); |
| return BCME_ERROR; |
| } |
| |
| dhd_os_rxflock(dhdp); |
| store_idx = dhdp->store_idx; |
| sent_idx = dhdp->sent_idx; |
| if (dhdp->skbbuf[store_idx] != NULL) { |
| /* Make sure the previous packets are processed */ |
| dhd_os_rxfunlock(dhdp); |
| #ifdef RXF_DEQUEUE_ON_BUSY |
| DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n", |
| skb, store_idx, sent_idx)); |
| return BCME_BUSY; |
| #else /* RXF_DEQUEUE_ON_BUSY */ |
| DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n", |
| skb, store_idx, sent_idx)); |
| /* removed msleep here, should use wait_event_timeout if we |
| * want to give rx frame thread a chance to run |
| */ |
| #if defined(WAIT_DEQUEUE) |
| OSL_SLEEP(1); |
| #endif |
| return BCME_ERROR; |
| #endif /* RXF_DEQUEUE_ON_BUSY */ |
| } |
| DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n", |
| skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1))); |
| dhdp->skbbuf[store_idx] = skb; |
| dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1); |
| dhd_os_rxfunlock(dhdp); |
| |
| return BCME_OK; |
| } |
| |
| static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp) |
| { |
| uint32 store_idx; |
|