wifi: update ap6356 driver to bcmdhd.101.10.361.x [1/1]
PD#SWPL-81801
BUG=232494797
Problem:
update ap6356 driver to bcmdhd.101.10.361.x
Solution:
update ap6356 driver to bcmdhd.101.10.361.x
Verify:
adt3
Signed-off-by: libo <bo.li@amlogic.com>
Change-Id: I61f0851c85fc9bd8ae5802d2811cfb0a5d4bce05
diff --git a/bcmdhd.101.10.361.x/Kconfig b/bcmdhd.101.10.361.x/Kconfig
new file mode 100755
index 0000000..f49ae76
--- /dev/null
+++ b/bcmdhd.101.10.361.x/Kconfig
@@ -0,0 +1,61 @@
+config BCMDHD
+ tristate "Broadcom FullMAC wireless cards support"
+ ---help---
+ This module adds support for wireless adapters based on
+ Broadcom FullMAC chipset.
+
+config BCMDHD_FW_PATH
+ depends on BCMDHD
+ string "Firmware path"
+ default "/system/etc/firmware/fw_bcmdhd.bin"
+ ---help---
+ Path to the firmware file.
+
+config BCMDHD_NVRAM_PATH
+ depends on BCMDHD
+ string "NVRAM path"
+ default "/system/etc/firmware/nvram.txt"
+ ---help---
+ Path to the calibration file.
+
+config BCMDHD_WEXT
+ bool "Enable WEXT support"
+ depends on BCMDHD && CFG80211 = n
+ select WIRELESS_EXT
+ select WEXT_PRIV
+ help
+ Enables WEXT support
+
+choice
+ prompt "Enable Chip Interface"
+ depends on BCMDHD
+ ---help---
+ Enable Chip Interface.
+config BCMDHD_SDIO
+ bool "SDIO bus interface support"
+ depends on BCMDHD && MMC
+config BCMDHD_PCIE
+ bool "PCIe bus interface support"
+ depends on BCMDHD && PCI
+config BCMDHD_USB
+ bool "USB bus interface support"
+ depends on BCMDHD && USB
+endchoice
+
+choice
+ depends on BCMDHD && BCMDHD_SDIO
+ prompt "Interrupt type"
+ ---help---
+ Interrupt type
+config BCMDHD_OOB
+ depends on BCMDHD && BCMDHD_SDIO
+ bool "Out-of-Band Interrupt"
+ default y
+ ---help---
+ Interrupt from WL_HOST_WAKE.
+config BCMDHD_SDIO_IRQ
+ depends on BCMDHD && BCMDHD_SDIO
+ bool "In-Band Interrupt"
+ ---help---
+ Interrupt from SDIO DAT[1]
+endchoice
diff --git a/bcmdhd.101.10.361.x/Makefile b/bcmdhd.101.10.361.x/Makefile
new file mode 100755
index 0000000..faf1fa5
--- /dev/null
+++ b/bcmdhd.101.10.361.x/Makefile
@@ -0,0 +1,391 @@
+# bcmdhd
+
+# if not confiure pci mode, we use sdio mode as default
+ifeq ($(CONFIG_BCMDHD_PCIE),)
+$(info bcm SDIO driver configured)
+CONFIG_DHD_USE_STATIC_BUF := y
+endif
+
+ifeq ($(CONFIG_BCMDHD_SDIO),y)
+MODULE_NAME := dhd
+else
+ifeq ($(CONFIG_BCMDHD_USB),y)
+MODULE_NAME := bcmdhd
+else
+MODULE_NAME := dhdpci
+endif
+endif
+
+CONFIG_BCMDHD_ANDROID_VERSION := 13
+
+CONFIG_BCMDHD ?= m
+
+#CONFIG_BCMDHD_SDIO := y
+#CONFIG_BCMDHD_PCIE := y
+#CONFIG_BCMDHD_USB := y
+
+CONFIG_BCMDHD_OOB := y
+#CONFIG_BCMDHD_CUSB := y
+#CONFIG_BCMDHD_NO_POWER_OFF := y
+CONFIG_BCMDHD_PROPTXSTATUS := y
+CONFIG_DHD_USE_STATIC_BUF := y
+#CONFIG_BCMDHD_STATIC_BUF_IN_DHD := y
+CONFIG_BCMDHD_ANDROID_VERSION := 11
+CONFIG_BCMDHD_AUTO_SELECT := y
+CONFIG_BCMDHD_DEBUG := y
+#CONFIG_BCMDHD_TIMESTAMP := y
+#CONFIG_BCMDHD_WAPI := y
+#CONFIG_BCMDHD_RANDOM_MAC := y
+#CONFIG_BCMDHD_MULTIPLE_DRIVER := y
+CONFIG_BCMDHD_TPUT := y
+
+CONFIG_MACH_PLATFORM := y
+#CONFIG_BCMDHD_DTS := y
+
+ifndef CONFIG_KASAN
+ KBUILD_CFLAGS_MODULE += -Wframe-larger-than=3000
+endif
+
+DHDCFLAGS = -Wall -Wstrict-prototypes -Wno-date-time \
+ -Dlinux -DLINUX -DBCMDRIVER \
+ -Wno-unknown-warning-option \
+ -Wno-maybe-uninitialized -Wno-error -Wno-format-security \
+ -Wno-implicit-fallthrough \
+ -DBCMDONGLEHOST -DBCMDMA32 -DBCMFILEIMAGE \
+ -DDHDTHREAD -DDHD_DEBUG -DSHOW_EVENTS -DGET_OTP_MAC_ENABLE \
+ -DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT -DSUPPORT_PM2_ONLY \
+ -DKEEP_ALIVE -DPKT_FILTER_SUPPORT -DDHDTCPACK_SUPPRESS \
+ -DDHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT -DOEM_ANDROID \
+ -DMULTIPLE_SUPPLICANT -DTSQ_MULTIPLIER -DMFP -DDHD_8021X_DUMP \
+ -DPOWERUP_MAX_RETRY=1 -DIFACE_HANG_FORCE_DEV_CLOSE -DWAIT_DEQUEUE \
+ -DUSE_NEW_RSPEC_DEFS \
+ -DWL_EXT_IAPSTA -DWL_ESCAN -DCCODE_LIST -DSUSPEND_EVENT \
+ -DEAPOL_RESEND -DEAPOL_DYNAMATIC_RESEND \
+ -DENABLE_INSMOD_NO_FW_LOAD
+
+DHDOFILES = aiutils.o siutils.o sbutils.o bcmutils.o bcmwifi_channels.o \
+ dhd_linux.o dhd_linux_platdev.o dhd_linux_sched.o dhd_pno.o \
+ dhd_common.o dhd_ip.o dhd_linux_wq.o dhd_custom_gpio.o \
+ bcmevent.o hndpmu.o linux_osl.o wldev_common.o wl_android.o \
+ dhd_debug_linux.o dhd_debug.o dhd_mschdbg.o dhd_dbg_ring.o \
+ hnd_pktq.o hnd_pktpool.o bcmxtlv.o linux_pkt.o bcmstdlib_s.o frag.o \
+ dhd_linux_exportfs.o dhd_linux_pktdump.o dhd_mschdbg.o \
+ dhd_config.o dhd_ccode.o wl_event.o wl_android_ext.o \
+ wl_iapsta.o wl_escan.o
+
+ifneq ($(CONFIG_WIRELESS_EXT),)
+ DHDOFILES += wl_iw.o
+ DHDCFLAGS += -DSOFTAP -DWL_WIRELESS_EXT -DUSE_IW
+endif
+ifneq ($(CONFIG_CFG80211),)
+ DHDOFILES += wl_cfg80211.o wl_cfgscan.o wl_cfgp2p.o
+ DHDOFILES += wl_linux_mon.o wl_cfg_btcoex.o wl_cfgvendor.o
+ DHDOFILES += dhd_cfg80211.o wl_cfgvif.o
+ DHDCFLAGS += -DWL_CFG80211 -DWLP2P -DWL_CFG80211_STA_EVENT
+ DHDCFLAGS += -DWL_IFACE_COMB_NUM_CHANNELS
+ DHDCFLAGS += -DCUSTOM_PNO_EVENT_LOCK_xTIME=10
+ DHDCFLAGS += -DWL_SUPPORT_AUTO_CHANNEL
+ DHDCFLAGS += -DWL_SUPPORT_BACKPORTED_KPATCHES
+ DHDCFLAGS += -DESCAN_RESULT_PATCH -DESCAN_BUF_OVERFLOW_MGMT
+ DHDCFLAGS += -DVSDB -DWL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+ DHDCFLAGS += -DWLTDLS -DMIRACAST_AMPDU_SIZE=8
+ DHDCFLAGS += -DWL_VIRTUAL_APSTA
+ DHDCFLAGS += -DPNO_SUPPORT -DEXPLICIT_DISCIF_CLEANUP
+ DHDCFLAGS += -DDHD_USE_SCAN_WAKELOCK
+ DHDCFLAGS += -DSPECIFIC_MAC_GEN_SCHEME
+ DHDCFLAGS += -DWL_IFACE_MGMT
+ DHDCFLAGS += -DWLFBT
+ DHDCFLAGS += -DWL_EXT_RECONNECT
+ DHDCFLAGS += -DDHD_LOSSLESS_ROAMING
+ DHDCFLAGS += -DGTK_OFFLOAD_SUPPORT
+ DHDCFLAGS += -DWL_STATIC_IF
+ DHDCFLAGS += -DWL_CLIENT_SAE -DWL_OWE
+endif
+
+#BCMDHD_SDIO
+ifneq ($(CONFIG_BCMDHD_SDIO),)
+BUS_TYPE := "sdio"
+DHDCFLAGS += -DBCMSDIO -DMMC_SDIO_ABORT -DMMC_SW_RESET -DBCMLXSDMMC \
+ -DUSE_SDIOFIFO_IOVAR -DSDTEST \
+ -DBDC -DDHD_USE_IDLECOUNT -DCUSTOM_SDIO_F2_BLKSIZE=256 \
+ -DBCMSDIOH_TXGLOM -DBCMSDIOH_TXGLOM_EXT -DRXFRAME_THREAD \
+ -DDHDENABLE_TAILPAD -DSUPPORT_P2P_GO_PS \
+ -DBCMSDIO_RXLIM_POST -DBCMSDIO_TXSEQ_SYNC -DCONSOLE_DPC \
+ -DBCMSDIO_INTSTATUS_WAR
+ifeq ($(CONFIG_BCMDHD_OOB),y)
+ DHDCFLAGS += -DOOB_INTR_ONLY -DCUSTOMER_OOB -DHW_OOB
+ifeq ($(CONFIG_BCMDHD_DISABLE_WOWLAN),y)
+ DHDCFLAGS += -DDISABLE_WOWLAN
+endif
+else
+ DHDCFLAGS += -DSDIO_ISR_THREAD
+endif
+DHDOFILES += bcmsdh.o bcmsdh_linux.o bcmsdh_sdmmc.o bcmsdh_sdmmc_linux.o \
+ dhd_sdio.o dhd_cdc.o dhd_wlfc.o
+endif
+
+#BCMDHD_PCIE
+ifneq ($(CONFIG_BCMDHD_PCIE),)
+BUS_TYPE := "pcie"
+DHDCFLAGS += -DPCIE_FULL_DONGLE -DBCMPCIE -DCUSTOM_DPC_PRIO_SETTING=-1 \
+ -DDONGLE_ENABLE_ISOLATION
+DHDCFLAGS += -DDHD_LB -DDHD_LB_RXP -DDHD_LB_STATS -DDHD_LB_TXP
+DHDCFLAGS += -DDHD_PKTID_AUDIT_ENABLED
+DHDCFLAGS += -DINSMOD_FW_LOAD
+DHDCFLAGS += -DCONFIG_HAS_WAKELOCK
+#DHDCFLAGS += -DDHD_PCIE_RUNTIMEPM -DMAX_IDLE_COUNT=11 -DCUSTOM_DHD_RUNTIME_MS=100
+ifeq ($(CONFIG_BCMDHD_OOB),y)
+ DHDCFLAGS += -DCUSTOMER_OOB -DBCMPCIE_OOB_HOST_WAKE
+endif
+ifneq ($(CONFIG_PCI_MSI),)
+ DHDCFLAGS += -DDHD_MSI_SUPPORT
+endif
+CONFIG_BCMDHD_NO_POWER_OFF := y
+DHDCFLAGS += -DDHD_DISABLE_ASPM
+#DHDCFLAGS += -DUSE_AML_PCIE_TEE_MEM
+DHDOFILES += dhd_pcie.o dhd_pcie_linux.o pcie_core.o dhd_flowring.o \
+ dhd_msgbuf.o dhd_linux_lb.o
+endif
+
+#BCMDHD_USB
+ifneq ($(CONFIG_BCMDHD_USB),)
+BUS_TYPE := "usb"
+DHDCFLAGS += -DUSBOS_TX_THREAD -DBCMDBUS -DBCMTRXV2 -DDBUS_USB_LOOPBACK \
+ -DBDC
+DHDCFLAGS += -DBCM_REQUEST_FW -DEXTERNAL_FW_PATH
+CONFIG_BCMDHD_NO_POWER_OFF := y
+ifneq ($(CONFIG_BCMDHD_CUSB),)
+ DHDCFLAGS += -DBCMUSBDEV_COMPOSITE
+ CONFIG_BCMDHD_NO_POWER_OFF := y
+endif
+DHDOFILES += dbus.o dbus_usb.o dbus_usb_linux.o dhd_cdc.o dhd_wlfc.o
+endif
+
+ifeq ($(CONFIG_BCMDHD_NO_POWER_OFF),y)
+ DHDCFLAGS += -DENABLE_INSMOD_NO_FW_LOAD
+ DHDCFLAGS += -DENABLE_INSMOD_NO_POWER_OFF -DNO_POWER_OFF_AFTER_OPEN
+endif
+
+ifeq ($(CONFIG_BCMDHD_MULTIPLE_DRIVER),y)
+ DHDCFLAGS += -DBCMDHD_MDRIVER
+ DHDCFLAGS += -DBUS_TYPE=\"-$(BUS_TYPE)\"
+ DHDCFLAGS += -DDHD_LOG_PREFIX=\"[dhd-$(BUS_TYPE)]\"
+ MODULE_NAME := dhd$(BUS_TYPE)
+else
+ DHDCFLAGS += -DBUS_TYPE=\"\"
+endif
+
+ifeq ($(CONFIG_BCMDHD_TIMESTAMP),y)
+ DHDCFLAGS += -DKERNEL_TIMESTAMP
+ DHDCFLAGS += -DSYSTEM_TIMESTAMP
+endif
+
+#PROPTXSTATUS
+ifeq ($(CONFIG_BCMDHD_PROPTXSTATUS),y)
+ifneq ($(CONFIG_BCMDHD_USB),)
+ DHDCFLAGS += -DPROP_TXSTATUS
+endif
+ifneq ($(CONFIG_BCMDHD_SDIO),)
+ DHDCFLAGS += -DPROP_TXSTATUS -DPROPTX_MAXCOUNT
+endif
+ifneq ($(CONFIG_CFG80211),)
+ DHDCFLAGS += -DPROP_TXSTATUS_VSDB
+endif
+endif
+
+ifeq ($(CONFIG_64BIT),y)
+ DHDCFLAGS := $(filter-out -DBCMDMA32,$(DHDCFLAGS))
+ DHDCFLAGS += -DBCMDMA64OSL
+endif
+
+# For Android VTS
+ifneq ($(CONFIG_BCMDHD_ANDROID_VERSION),)
+ DHDCFLAGS += -DANDROID_VERSION=$(CONFIG_BCMDHD_ANDROID_VERSION)
+ DHDCFLAGS += -DDHD_NOTIFY_MAC_CHANGED
+ifneq ($(CONFIG_CFG80211),)
+ DHDCFLAGS += -DGSCAN_SUPPORT -DRTT_SUPPORT -DLINKSTAT_SUPPORT
+ DHDCFLAGS += -DCUSTOM_COUNTRY_CODE -DDHD_GET_VALID_CHANNELS
+ DHDCFLAGS += -DDEBUGABILITY -DDBG_PKT_MON
+ DHDCFLAGS += -DDHD_LOG_DUMP -DDHD_FW_COREDUMP
+ DHDCFLAGS += -DAPF -DNDO_CONFIG_SUPPORT -DRSSI_MONITOR_SUPPORT
+ DHDCFLAGS += -DDHD_WAKE_STATUS
+ DHDOFILES += dhd_rtt.o bcm_app_utils.o
+endif
+else
+ DHDCFLAGS += -DANDROID_VERSION=0
+endif
+
+# For Debug
+ifeq ($(CONFIG_BCMDHD_DEBUG),y)
+ DHDCFLAGS += -DDHD_ARP_DUMP -DDHD_DHCP_DUMP -DDHD_ICMP_DUMP
+ DHDCFLAGS += -DDHD_DNS_DUMP -DDHD_TRX_DUMP
+ DHDCFLAGS += -DTPUT_MONITOR
+# DHDCFLAGS += -DSCAN_SUPPRESS -DBSSCACHE
+ DHDCFLAGS += -DCHECK_DOWNLOAD_FW
+ DHDCFLAGS += -DPKT_STATICS
+ DHDCFLAGS += -DKSO_DEBUG
+# DHDCFLAGS += -DDHD_PKTDUMP_TOFW
+endif
+
+# For Debug2
+ifeq ($(CONFIG_BCMDHD_DEBUG2),y)
+ DHDCFLAGS += -DDEBUGFS_CFG80211
+ DHDCFLAGS += -DSHOW_LOGTRACE -DDHD_LOG_DUMP -DDHD_FW_COREDUMP
+ DHDCFLAGS += -DBCMASSERT_LOG -DSI_ERROR_ENFORCE
+ifneq ($(CONFIG_BCMDHD_PCIE),)
+ DHDCFLAGS += -DEWP_EDL
+ DHDCFLAGS += -DDNGL_EVENT_SUPPORT
+ DHDCFLAGS += -DDHD_SSSR_DUMP
+endif
+endif
+
+# MESH support for kernel 3.10 later
+ifeq ($(CONFIG_WL_MESH),y)
+ DHDCFLAGS += -DWLMESH
+ifneq ($(CONFIG_CFG80211),)
+ DHDCFLAGS += -DWLMESH_CFG80211
+endif
+ifneq ($(CONFIG_BCMDHD_PCIE),)
+ DHDCFLAGS += -DBCM_HOST_BUF -DDMA_HOST_BUFFER_LEN=0x80000
+endif
+ DHDCFLAGS += -DDHD_UPDATE_INTF_MAC
+ DHDCFLAGS :=$(filter-out -DDHD_FW_COREDUMP,$(DHDCFLAGS))
+ DHDCFLAGS :=$(filter-out -DWL_STATIC_IF,$(DHDCFLAGS))
+endif
+
+ifeq ($(CONFIG_BCMDHD_EASYMESH),y)
+ DHDCFLAGS :=$(filter-out -DDHD_FW_COREDUMP,$(DHDCFLAGS))
+ DHDCFLAGS :=$(filter-out -DDHD_LOG_DUMP,$(DHDCFLAGS))
+ DHDCFLAGS += -DWLEASYMESH -DWL_STATIC_IF -DWLDWDS -DFOURADDR_AUTO_BRG
+endif
+
+#CSI_SUPPORT
+ifeq ($(CONFIG_CSI_SUPPORT),y)
+ DHDCFLAGS += -DCSI_SUPPORT
+ DHDOFILES += dhd_csi.o
+endif
+
+# For TPUT_IMPROVE
+ifeq ($(CONFIG_BCMDHD_TPUT),y)
+ DHDCFLAGS += -DDHD_TPUT_PATCH
+ DHDCFLAGS += -DTCPACK_INFO_MAXNUM=10 -DTCPDATA_INFO_MAXNUM=10
+ifneq ($(CONFIG_BCMDHD_SDIO),)
+ DHDCFLAGS += -DDYNAMIC_MAX_HDR_READ
+ DHDCFLAGS :=$(filter-out -DSDTEST,$(DHDCFLAGS))
+endif
+ifneq ($(CONFIG_BCMDHD_PCIE),)
+ DHDCFLAGS += -DDHD_LB_TXP_DEFAULT_ENAB
+ DHDCFLAGS += -DSET_RPS_CPUS -DSET_XPS_CPUS
+ DHDCFLAGS += -DDHD_LB_PRIMARY_CPUS=0xF0 -DDHD_LB_SECONDARY_CPUS=0x0E
+endif
+endif
+
+# For Zero configure
+ifeq ($(CONFIG_BCMDHD_ZEROCONFIG),y)
+ DHDCFLAGS += -DWL_EXT_GENL -DSENDPROB
+ DHDOFILES += wl_ext_genl.o
+endif
+
+# For WAPI
+ifeq ($(CONFIG_BCMDHD_WAPI),y)
+ DHDCFLAGS += -DBCMWAPI_WPI -DBCMWAPI_WAI
+endif
+
+# For scan random mac
+ifneq ($(CONFIG_BCMDHD_RANDOM_MAC),)
+ifneq ($(CONFIG_CFG80211),)
+ DHDCFLAGS += -DSUPPORT_RANDOM_MAC_SCAN -DWL_USE_RANDOMIZED_SCAN
+endif
+endif
+
+# For NAN
+ifneq ($(CONFIG_BCMDHD_NAN),)
+ DHDCFLAGS += -DWL_NAN -DWL_NAN_DISC_CACHE
+ DHDOFILES += wl_cfgnan.o bcmbloom.o
+endif
+
+# For Module auto-selection
+ifeq ($(CONFIG_BCMDHD_AUTO_SELECT),y)
+ DHDCFLAGS += -DUPDATE_MODULE_NAME
+ifneq ($(CONFIG_BCMDHD_SDIO),)
+ DHDCFLAGS += -DGET_OTP_MODULE_NAME -DCOMPAT_OLD_MODULE
+endif
+endif
+
+ifeq ($(CONFIG_BCMDHD),m)
+ DHDCFLAGS += -DBCMDHD_MODULAR
+endif
+
+ifeq ($(CONFIG_MACH_PLATFORM),y)
+ DHDOFILES += dhd_gpio.o
+ifeq ($(CONFIG_BCMDHD_DTS),y)
+ DHDCFLAGS += -DBCMDHD_DTS
+endif
+ DHDCFLAGS += -DCUSTOMER_HW -DDHD_OF_SUPPORT
+ DHDCFLAGS += -DCUSTOMER_HW_AMLOGIC
+
+# for config custom MAC
+# DHDCFLAGS += -DGET_CUSTOM_MAC_ENABLE -DCUSTOM_MULTI_MAC
+# if also need config AP MAC
+# DHDCFLAGS += -DCUSTOM_AP_MAC
+#
+endif
+
+ifeq ($(CONFIG_BCMDHD_AG),y)
+ DHDCFLAGS += -DBAND_AG
+endif
+
+ifeq ($(CONFIG_DHD_USE_STATIC_BUF),y)
+ifeq ($(CONFIG_BCMDHD_STATIC_BUF_IN_DHD),y)
+ DHDOFILES += dhd_static_buf.o
+ DHDCFLAGS += -DDHD_STATIC_IN_DRIVER
+else
+# obj-m += dhd_static_buf.o
+endif
+ DHDCFLAGS += -DSTATIC_WL_PRIV_STRUCT -DENHANCED_STATIC_BUF
+ DHDCFLAGS += -DCONFIG_DHD_USE_STATIC_BUF
+ DHDCFLAGS += -DDHD_USE_STATIC_MEMDUMP
+ifneq ($(CONFIG_BCMDHD_PCIE),)
+ DHDCFLAGS += -DDHD_USE_STATIC_CTRLBUF
+endif
+endif
+
+ARCH ?= arm64
+CROSS_COMPILE ?=aarch64-linux-gnu-
+KDIR ?=../../../../../../common
+
+BCMDHD_ROOT = $(src)
+#$(warning "BCMDHD_ROOT=$(BCMDHD_ROOT)")
+EXTRA_CFLAGS = $(DHDCFLAGS)
+EXTRA_CFLAGS += -DDHD_COMPILED=\"$(BCMDHD_ROOT)\"
+EXTRA_CFLAGS += -I$(BCMDHD_ROOT)/include/ -I$(BCMDHD_ROOT)/
+ifeq ($(CONFIG_BCMDHD),m)
+EXTRA_LDFLAGS += --strip-debug
+endif
+
+obj-$(CONFIG_BCMDHD) += $(MODULE_NAME).o
+$(MODULE_NAME)-objs += $(DHDOFILES)
+ccflags-y := $(EXTRA_CFLAGS)
+
+#all: bcmdhd_sdio bcmdhd_usb
+all: bcmdhd_sdio
+
+EXTRA_CFLAGS += -I$(KERNEL_SRC)/$(M)/include -I$(KERNEL_SRC)/$(M)/
+modules_install:
+ @$(MAKE) INSTALL_MOD_STRIP=1 M=$(M) -C $(KERNEL_SRC) modules_install
+ mkdir -p ${OUT_DIR}/../private/modules
+ cd ${OUT_DIR}/$(M)/; find -name "*.ko" -exec cp {} ${OUT_DIR}/../private/modules/ \;
+
+bcmdhd_sdio:
+ $(warning "building BCMDHD_SDIO..........")
+ $(MAKE) -C $(KERNEL_SRC) M=$(M) CONFIG_DHD_USE_STATIC_BUF=y CONFIG_BCMDHD_SDIO=y modules
+
+bcmdhd_usb:
+ $(warning "building BCMDHD_USB..........")
+ $(MAKE) -C $(KDIR) M=$(PWD) ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) modules CONFIG_BCMDHD_USB=y
+ mv dhd.ko dhd_usb.ko
+
+clean:
+ $(MAKE) -C $(KDIR) M=$(PWD) ARCH=$(ARCH) clean
+ $(RM) Module.markers
+ $(RM) modules.order
diff --git a/bcmdhd.101.10.361.x/aiutils.c b/bcmdhd.101.10.361.x/aiutils.c
new file mode 100755
index 0000000..b8b9555
--- /dev/null
+++ b/bcmdhd.101.10.361.x/aiutils.c
@@ -0,0 +1,2604 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+#include <pcie_core.h>
+
+#include "siutils_priv.h"
+#include <bcmdevs.h>
+
+#if defined(ETD)
+#include <etd.h>
+#endif
+
+#if !defined(BCMDONGLEHOST)
+#define PMU_DMP() (cores_info->coreid[sii->curidx] == PMU_CORE_ID)
+#define GCI_DMP() (cores_info->coreid[sii->curidx] == GCI_CORE_ID)
+#else
+#define PMU_DMP() (0)
+#define GCI_DMP() (0)
+#endif /* !defined(BCMDONGLEHOST) */
+
+#if defined(AXI_TIMEOUTS_NIC)
+static bool ai_get_apb_bridge(const si_t *sih, uint32 coreidx, uint32 *apb_id,
+ uint32 *apb_coreunit);
+#endif /* AXI_TIMEOUTS_NIC */
+
+#if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
+static void ai_reset_axi_to(const si_info_t *sii, aidmp_t *ai);
+#endif /* defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) */
+
+#ifdef DONGLEBUILD
+static uint32 ai_get_sizeof_wrapper_offsets_to_dump(void);
+static uint32 ai_get_wrapper_base_addr(uint32 **offset);
+#endif /* DONGLEBUILD */
+
+/* AXI ID to CoreID + unit mappings */
+typedef struct axi_to_coreidx {
+ uint coreid;
+ uint coreunit;
+} axi_to_coreidx_t;
+
+static const axi_to_coreidx_t axi2coreidx_4369[] = {
+ {CC_CORE_ID, 0}, /* 00 Chipcommon */
+ {PCIE2_CORE_ID, 0}, /* 01 PCIe */
+ {D11_CORE_ID, 0}, /* 02 D11 Main */
+ {ARMCR4_CORE_ID, 0}, /* 03 ARM */
+ {BT_CORE_ID, 0}, /* 04 BT AHB */
+ {D11_CORE_ID, 1}, /* 05 D11 Aux */
+ {D11_CORE_ID, 0}, /* 06 D11 Main l1 */
+ {D11_CORE_ID, 1}, /* 07 D11 Aux l1 */
+ {D11_CORE_ID, 0}, /* 08 D11 Main l2 */
+ {D11_CORE_ID, 1}, /* 09 D11 Aux l2 */
+ {NODEV_CORE_ID, 0}, /* 10 M2M DMA */
+ {NODEV_CORE_ID, 0}, /* 11 unused */
+ {NODEV_CORE_ID, 0}, /* 12 unused */
+ {NODEV_CORE_ID, 0}, /* 13 unused */
+ {NODEV_CORE_ID, 0}, /* 14 unused */
+ {NODEV_CORE_ID, 0} /* 15 unused */
+};
+
+/* EROM parsing */
+
+static uint32
+get_erom_ent(const si_t *sih, uint32 **eromptr, uint32 mask, uint32 match)
+{
+ uint32 ent;
+ uint inv = 0, nom = 0;
+ uint32 size = 0;
+
+ while (TRUE) {
+ ent = R_REG(SI_INFO(sih)->osh, *eromptr);
+ (*eromptr)++;
+
+ if (mask == 0)
+ break;
+
+ if ((ent & ER_VALID) == 0) {
+ inv++;
+ continue;
+ }
+
+ if (ent == (ER_END | ER_VALID))
+ break;
+
+ if ((ent & mask) == match)
+ break;
+
+ /* escape condition related EROM size if it has invalid values */
+ size += sizeof(*eromptr);
+ if (size >= ER_SZ_MAX) {
+ SI_ERROR(("Failed to find end of EROM marker\n"));
+ break;
+ }
+
+ nom++;
+ }
+
+ SI_VMSG(("get_erom_ent: Returning ent 0x%08x\n", ent));
+ if (inv + nom) {
+ SI_VMSG((" after %d invalid and %d non-matching entries\n", inv, nom));
+ }
+ return ent;
+}
+
+static uint32
+get_asd(const si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh,
+ uint32 *sizel, uint32 *sizeh)
+{
+ uint32 asd, sz, szd;
+
+ BCM_REFERENCE(ad);
+
+ asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
+ if (((asd & ER_TAG1) != ER_ADD) ||
+ (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
+ ((asd & AD_ST_MASK) != st)) {
+ /* This is not what we want, "push" it back */
+ (*eromptr)--;
+ return 0;
+ }
+ *addrl = asd & AD_ADDR_MASK;
+ if (asd & AD_AG32)
+ *addrh = get_erom_ent(sih, eromptr, 0, 0);
+ else
+ *addrh = 0;
+ *sizeh = 0;
+ sz = asd & AD_SZ_MASK;
+ if (sz == AD_SZ_SZD) {
+ szd = get_erom_ent(sih, eromptr, 0, 0);
+ *sizel = szd & SD_SZ_MASK;
+ if (szd & SD_SG32)
+ *sizeh = get_erom_ent(sih, eromptr, 0, 0);
+ } else
+ *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
+
+ SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
+ sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
+
+ return asd;
+}
+
+/* Parse the enumeration rom to identify all cores */
+void
+BCMATTACHFN(ai_scan)(si_t *sih, void *regs, uint devid)
+{
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+ chipcregs_t *cc = (chipcregs_t *)regs;
+ uint32 erombase, *eromptr, *eromlim;
+ axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
+
+ BCM_REFERENCE(devid);
+
+ erombase = R_REG(sii->osh, &cc->eromptr);
+
+ switch (BUSTYPE(sih->bustype)) {
+ case SI_BUS:
+ eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
+ break;
+
+ case PCI_BUS:
+ /* Set wrappers address */
+ sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
+
+ /* Now point the window at the erom */
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
+ eromptr = regs;
+ break;
+
+#ifdef BCMSDIO
+ case SPI_BUS:
+ case SDIO_BUS:
+ eromptr = (uint32 *)(uintptr)erombase;
+ break;
+#endif /* BCMSDIO */
+
+ default:
+ SI_ERROR(("Don't know how to do AXI enumeration on bus %d\n", sih->bustype));
+ ASSERT(0);
+ return;
+ }
+ eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
+ sii->axi_num_wrappers = 0;
+
+ SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n",
+ OSL_OBFUSCATE_BUF(regs), erombase,
+ OSL_OBFUSCATE_BUF(eromptr), OSL_OBFUSCATE_BUF(eromlim)));
+ while (eromptr < eromlim) {
+ uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
+ uint32 mpd, asd, addrl, addrh, sizel, sizeh;
+ uint i, j, idx;
+ bool br;
+
+ br = FALSE;
+
+ /* Grok a component */
+ cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
+ if (cia == (ER_END | ER_VALID)) {
+ SI_VMSG(("Found END of erom after %d cores\n", sii->numcores));
+ return;
+ }
+
+ cib = get_erom_ent(sih, &eromptr, 0, 0);
+
+ if ((cib & ER_TAG) != ER_CI) {
+ SI_ERROR(("CIA not followed by CIB\n"));
+ goto error;
+ }
+
+ cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
+ mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
+ crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
+ nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
+ nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
+ nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
+ nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
+
+#ifdef BCMDBG_SI
+ SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, "
+ "nsw = %d, nmp = %d & nsp = %d\n",
+ mfg, cid, crev, OSL_OBFUSCATE_BUF(eromptr - 1), nmw, nsw, nmp, nsp));
+#else
+ BCM_REFERENCE(crev);
+#endif
+
+ /* Include Default slave wrapper for timeout monitoring */
+ if ((nsp == 0 && nsw == 0) ||
+#if !defined(AXI_TIMEOUTS) && !defined(AXI_TIMEOUTS_NIC)
+ ((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) ||
+#else
+ ((CHIPTYPE(sii->pub.socitype) == SOCI_NAI) &&
+ (mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) ||
+#endif /* !defined(AXI_TIMEOUTS) && !defined(AXI_TIMEOUTS_NIC) */
+ FALSE) {
+ continue;
+ }
+
+ if ((nmw + nsw == 0)) {
+ /* A component which is not a core */
+ /* Should record some info */
+ if (cid == OOB_ROUTER_CORE_ID) {
+ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
+ &addrl, &addrh, &sizel, &sizeh);
+ if (asd != 0) {
+ if ((sii->oob_router != 0) && (sii->oob_router != addrl)) {
+ sii->oob_router1 = addrl;
+ } else {
+ sii->oob_router = addrl;
+ }
+ }
+ }
+ if ((cid != NS_CCB_CORE_ID) && (cid != PMU_CORE_ID) &&
+ (cid != GCI_CORE_ID) && (cid != SR_CORE_ID) &&
+ (cid != HUB_CORE_ID) && (cid != HND_OOBR_CORE_ID) &&
+ (cid != CCI400_CORE_ID) && (cid != SPMI_SLAVE_CORE_ID)) {
+ continue;
+ }
+ }
+
+ idx = sii->numcores;
+
+ cores_info->cia[idx] = cia;
+ cores_info->cib[idx] = cib;
+ cores_info->coreid[idx] = cid;
+
+ /* workaround the fact the variable buscoretype is used in _ai_set_coreidx()
+ * when checking PCIE_GEN2() for PCI_BUS case before it is setup later...,
+ * both use and setup happen in si_buscore_setup().
+ */
+ if (BUSTYPE(sih->bustype) == PCI_BUS &&
+ (cid == PCI_CORE_ID || cid == PCIE_CORE_ID || cid == PCIE2_CORE_ID)) {
+ sii->pub.buscoretype = (uint16)cid;
+ }
+
+ for (i = 0; i < nmp; i++) {
+ mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
+ if ((mpd & ER_TAG) != ER_MP) {
+ SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
+ goto error;
+ }
+ /* Record something? */
+ SI_VMSG((" Master port %d, mp: %d id: %d\n", i,
+ (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
+ (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
+ }
+
+ /* First Slave Address Descriptor should be port 0:
+ * the main register space for the core
+ */
+ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
+ if (asd == 0) {
+ do {
+ /* Try again to see if it is a bridge */
+ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
+ &sizel, &sizeh);
+ if (asd != 0)
+ br = TRUE;
+ else {
+ break;
+ }
+ } while (1);
+ } else {
+ if (addrl == 0 || sizel == 0) {
+ SI_ERROR((" Invalid ASD %x for slave port \n", asd));
+ goto error;
+ }
+ cores_info->coresba[idx] = addrl;
+ cores_info->coresba_size[idx] = sizel;
+ }
+
+ /* Get any more ASDs in first port */
+ j = 1;
+ do {
+ asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
+ &sizel, &sizeh);
+ /* Support ARM debug core ASD with address space > 4K */
+ if ((asd != 0) && (j == 1)) {
+ SI_VMSG(("Warning: sizel > 0x1000\n"));
+ cores_info->coresba2[idx] = addrl;
+ cores_info->coresba2_size[idx] = sizel;
+ }
+ j++;
+ } while (asd != 0);
+
+ /* Go through the ASDs for other slave ports */
+ for (i = 1; i < nsp; i++) {
+ j = 0;
+ do {
+ asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
+ &sizel, &sizeh);
+ /* To get the first base address of second slave port */
+ if ((asd != 0) && (i == 1) && (j == 0)) {
+ cores_info->csp2ba[idx] = addrl;
+ cores_info->csp2ba_size[idx] = sizel;
+ }
+ if (asd == 0)
+ break;
+ j++;
+ } while (1);
+ if (j == 0) {
+ SI_ERROR((" SP %d has no address descriptors\n", i));
+ goto error;
+ }
+ }
+
+ /* Now get master wrappers */
+ for (i = 0; i < nmw; i++) {
+ asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh,
+ &sizel, &sizeh);
+ if (asd == 0) {
+ SI_ERROR(("Missing descriptor for MW %d\n", i));
+ goto error;
+ }
+ if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+ SI_ERROR(("Master wrapper %d is not 4KB\n", i));
+ goto error;
+ }
+ if (i == 0) {
+ cores_info->wrapba[idx] = addrl;
+ } else if (i == 1) {
+ cores_info->wrapba2[idx] = addrl;
+ } else if (i == 2) {
+ cores_info->wrapba3[idx] = addrl;
+ }
+
+ if (axi_wrapper && (sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS)) {
+ axi_wrapper[sii->axi_num_wrappers].mfg = mfg;
+ axi_wrapper[sii->axi_num_wrappers].cid = cid;
+ axi_wrapper[sii->axi_num_wrappers].rev = crev;
+ axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_MASTER_WRAPPER;
+ axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl;
+ sii->axi_num_wrappers++;
+ SI_VMSG(("MASTER WRAPPER: %d, mfg:%x, cid:%x,"
+ "rev:%x, addr:%x, size:%x\n",
+ sii->axi_num_wrappers, mfg, cid, crev, addrl, sizel));
+ }
+ }
+
+ /* And finally slave wrappers */
+ for (i = 0; i < nsw; i++) {
+ uint fwp = (nsp <= 1) ? 0 : 1;
+ asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh,
+ &sizel, &sizeh);
+ if (asd == 0) {
+ SI_ERROR(("Missing descriptor for SW %d cid %x eromp %p fwp %d \n",
+ i, cid, eromptr, fwp));
+ goto error;
+ }
+
+ if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+ SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
+ goto error;
+ }
+
+ /* cache APB bridge wrapper address for set/clear timeout */
+ if ((mfg == MFGID_ARM) && (cid == APB_BRIDGE_ID)) {
+ ASSERT(sii->num_br < SI_MAXBR);
+ sii->br_wrapba[sii->num_br++] = addrl;
+ }
+
+ if ((mfg == MFGID_ARM) && (cid == ADB_BRIDGE_ID)) {
+ br = TRUE;
+ }
+
+ BCM_REFERENCE(br);
+
+ if ((nmw == 0) && (i == 0)) {
+ cores_info->wrapba[idx] = addrl;
+ } else if ((nmw == 0) && (i == 1)) {
+ cores_info->wrapba2[idx] = addrl;
+ } else if ((nmw == 0) && (i == 2)) {
+ cores_info->wrapba3[idx] = addrl;
+ }
+
+ /* Include all slave wrappers to the list to
+ * enable and monitor watchdog timeouts
+ */
+
+ if (axi_wrapper && (sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS)) {
+ axi_wrapper[sii->axi_num_wrappers].mfg = mfg;
+ axi_wrapper[sii->axi_num_wrappers].cid = cid;
+ axi_wrapper[sii->axi_num_wrappers].rev = crev;
+ axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_SLAVE_WRAPPER;
+ axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl;
+
+ sii->axi_num_wrappers++;
+
+ SI_VMSG(("SLAVE WRAPPER: %d, mfg:%x, cid:%x,"
+ "rev:%x, addr:%x, size:%x\n",
+ sii->axi_num_wrappers, mfg, cid, crev, addrl, sizel));
+ }
+ }
+
+#ifndef AXI_TIMEOUTS_NIC
+ /* Don't record bridges and core with 0 slave ports */
+ if (br || (nsp == 0)) {
+ continue;
+ }
+#endif
+
+ /* Done with core */
+ sii->numcores++;
+ }
+
+ SI_ERROR(("Reached end of erom without finding END\n"));
+
+error:
+ sii->numcores = 0;
+ return;
+}
+
+#define AI_SETCOREIDX_MAPSIZE(coreid) \
+ (((coreid) == NS_CCB_CORE_ID) ? 15 * SI_CORE_SIZE : SI_CORE_SIZE)
+
+/* This function changes the logical "focus" to the indicated core.
+ * Return the current core's virtual address.
+ */
+static volatile void *
+BCMPOSTTRAPFN(_ai_setcoreidx)(si_t *sih, uint coreidx, uint use_wrapn)
+{
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+ uint32 addr, wrap, wrap2, wrap3;
+ volatile void *regs;
+
+ if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
+ return (NULL);
+
+ addr = cores_info->coresba[coreidx];
+ wrap = cores_info->wrapba[coreidx];
+ wrap2 = cores_info->wrapba2[coreidx];
+ wrap3 = cores_info->wrapba3[coreidx];
+
+#ifdef AXI_TIMEOUTS_NIC
+ /* No need to disable interrupts while entering/exiting APB bridge core */
+ if ((cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID) &&
+ (cores_info->coreid[sii->curidx] != APB_BRIDGE_CORE_ID))
+#endif /* AXI_TIMEOUTS_NIC */
+ {
+ /*
+ * If the user has provided an interrupt mask enabled function,
+ * then assert interrupts are disabled before switching the core.
+ */
+ ASSERT((sii->intrsenabled_fn == NULL) ||
+ !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
+ }
+
+ switch (BUSTYPE(sih->bustype)) {
+ case SI_BUS:
+ /* map new one */
+ if (!cores_info->regs[coreidx]) {
+ cores_info->regs[coreidx] = REG_MAP(addr,
+ AI_SETCOREIDX_MAPSIZE(cores_info->coreid[coreidx]));
+ ASSERT(GOODREGS(cores_info->regs[coreidx]));
+ }
+ sii->curmap = regs = cores_info->regs[coreidx];
+ if (!cores_info->wrappers[coreidx] && (wrap != 0)) {
+ cores_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
+ ASSERT(GOODREGS(cores_info->wrappers[coreidx]));
+ }
+ if (!cores_info->wrappers2[coreidx] && (wrap2 != 0)) {
+ cores_info->wrappers2[coreidx] = REG_MAP(wrap2, SI_CORE_SIZE);
+ ASSERT(GOODREGS(cores_info->wrappers2[coreidx]));
+ }
+ if (!cores_info->wrappers3[coreidx] && (wrap3 != 0)) {
+ cores_info->wrappers3[coreidx] = REG_MAP(wrap3, SI_CORE_SIZE);
+ ASSERT(GOODREGS(cores_info->wrappers3[coreidx]));
+ }
+
+ if (use_wrapn == 2) {
+ sii->curwrap = cores_info->wrappers3[coreidx];
+ } else if (use_wrapn == 1) {
+ sii->curwrap = cores_info->wrappers2[coreidx];
+ } else {
+ sii->curwrap = cores_info->wrappers[coreidx];
+ }
+ break;
+
+ case PCI_BUS:
+ regs = sii->curmap;
+
+ /* point bar0 2nd 4KB window to the primary wrapper */
+ if (use_wrapn == 2) {
+ wrap = wrap3;
+ } else if (use_wrapn == 1) {
+ wrap = wrap2;
+ }
+
+ /* Use BAR0 Window to support dual mac chips... */
+
+ /* TODO: the other mac unit can't be supportd by the current BAR0 window.
+ * need to find other ways to access these cores.
+ */
+
+ switch (sii->slice) {
+ case 0: /* main/first slice */
+#ifdef AXI_TIMEOUTS_NIC
+ /* No need to set the BAR0 if core is APB Bridge.
+ * This is to reduce 2 PCI writes while checkng for errlog
+ */
+ if (cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID)
+#endif /* AXI_TIMEOUTS_NIC */
+ {
+ /* point bar0 window */
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr);
+ }
+
+ if (PCIE_GEN2(sii))
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2, 4, wrap);
+ else
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 4, wrap);
+
+ break;
+
+ case 1: /* aux/second slice */
+ /* PCIE GEN2 only for other slices */
+ if (!PCIE_GEN2(sii)) {
+ /* other slices not supported */
+ SI_ERROR(("PCI GEN not supported for slice %d\n", sii->slice));
+ ASSERT(0);
+ break;
+ }
+
+ /* 0x4000 - 0x4fff: enum space 0x5000 - 0x5fff: wrapper space */
+ regs = (volatile uint8 *)regs + PCI_SEC_BAR0_WIN_OFFSET;
+ sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
+
+ /* point bar0 window */
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_CORE2_WIN, 4, addr);
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_CORE2_WIN2, 4, wrap);
+ break;
+
+ case 2: /* scan/third slice */
+ /* PCIE GEN2 only for other slices */
+ if (!PCIE_GEN2(sii)) {
+ /* other slices not supported */
+ SI_ERROR(("PCI GEN not supported for slice %d\n", sii->slice));
+ ASSERT(0);
+ break;
+ }
+
+ /* 0x9000 - 0x9fff: enum space 0xa000 - 0xafff: wrapper space */
+ regs = (volatile uint8 *)regs + PCI_TER_BAR0_WIN_OFFSET;
+ sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
+
+ /* point bar0 window */
+ ai_corereg(sih, sih->buscoreidx, PCIE_TER_BAR0_WIN, ~0, addr);
+ ai_corereg(sih, sih->buscoreidx, PCIE_TER_BAR0_WRAPPER, ~0, wrap);
+ break;
+
+ default: /* other slices */
+ SI_ERROR(("BAR0 Window not supported for slice %d\n", sii->slice));
+ ASSERT(0);
+ break;
+ }
+
+ break;
+
+#ifdef BCMSDIO
+ case SPI_BUS:
+ case SDIO_BUS:
+ sii->curmap = regs = (void *)((uintptr)addr);
+ if (use_wrapn)
+ sii->curwrap = (void *)((uintptr)wrap2);
+ else
+ sii->curwrap = (void *)((uintptr)wrap);
+ break;
+#endif /* BCMSDIO */
+
+ default:
+ ASSERT(0);
+ sii->curmap = regs = NULL;
+ break;
+ }
+
+ sii->curidx = coreidx;
+
+ return regs;
+}
+
+volatile void *
+BCMPOSTTRAPFN(ai_setcoreidx)(si_t *sih, uint coreidx)
+{
+ return _ai_setcoreidx(sih, coreidx, 0);
+}
+
+volatile void *
+BCMPOSTTRAPFN(ai_setcoreidx_2ndwrap)(si_t *sih, uint coreidx)
+{
+ return _ai_setcoreidx(sih, coreidx, 1);
+}
+
+volatile void *
+BCMPOSTTRAPFN(ai_setcoreidx_3rdwrap)(si_t *sih, uint coreidx)
+{
+ return _ai_setcoreidx(sih, coreidx, 2);
+}
+
+void
+ai_coreaddrspaceX(const si_t *sih, uint asidx, uint32 *addr, uint32 *size)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+ chipcregs_t *cc = NULL;
+ uint32 erombase, *eromptr, *eromlim;
+ uint i, j, cidx;
+ uint32 cia, cib, nmp, nsp;
+ uint32 asd, addrl, addrh, sizel, sizeh;
+
+ for (i = 0; i < sii->numcores; i++) {
+ if (cores_info->coreid[i] == CC_CORE_ID) {
+ cc = (chipcregs_t *)cores_info->regs[i];
+ break;
+ }
+ }
+ if (cc == NULL)
+ goto error;
+
+ BCM_REFERENCE(erombase);
+ erombase = R_REG(sii->osh, &cc->eromptr);
+ eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
+ eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
+
+ cidx = sii->curidx;
+ cia = cores_info->cia[cidx];
+ cib = cores_info->cib[cidx];
+
+ nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
+ nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
+
+ /* scan for cores */
+ while (eromptr < eromlim) {
+ if ((get_erom_ent(sih, &eromptr, ER_TAG, ER_CI) == cia) &&
+ (get_erom_ent(sih, &eromptr, 0, 0) == cib)) {
+ break;
+ }
+ }
+
+ /* skip master ports */
+ for (i = 0; i < nmp; i++)
+ get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
+
+ /* Skip ASDs in port 0 */
+ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
+ if (asd == 0) {
+ /* Try again to see if it is a bridge */
+ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
+ &sizel, &sizeh);
+ }
+
+ j = 1;
+ do {
+ asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
+ &sizel, &sizeh);
+ j++;
+ } while (asd != 0);
+
+ /* Go through the ASDs for other slave ports */
+ for (i = 1; i < nsp; i++) {
+ j = 0;
+ do {
+ asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
+ &sizel, &sizeh);
+ if (asd == 0)
+ break;
+
+ if (!asidx--) {
+ *addr = addrl;
+ *size = sizel;
+ return;
+ }
+ j++;
+ } while (1);
+
+ if (j == 0) {
+ SI_ERROR((" SP %d has no address descriptors\n", i));
+ break;
+ }
+ }
+
+error:
+ *size = 0;
+ return;
+}
+
+/* Return the number of address spaces in current core */
+int
+ai_numaddrspaces(const si_t *sih)
+{
+ /* TODO: Either save it or parse the EROM on demand, currently hardcode 2 */
+ BCM_REFERENCE(sih);
+
+ return 2;
+}
+
+/* Return the address of the nth address space in the current core
+ * Arguments:
+ * sih : Pointer to struct si_t
+ * spidx : slave port index
+ * baidx : base address index
+ */
+uint32
+ai_addrspace(const si_t *sih, uint spidx, uint baidx)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+ uint cidx;
+
+ cidx = sii->curidx;
+
+ if (spidx == CORE_SLAVE_PORT_0) {
+ if (baidx == CORE_BASE_ADDR_0)
+ return cores_info->coresba[cidx];
+ else if (baidx == CORE_BASE_ADDR_1)
+ return cores_info->coresba2[cidx];
+ }
+ else if (spidx == CORE_SLAVE_PORT_1) {
+ if (baidx == CORE_BASE_ADDR_0)
+ return cores_info->csp2ba[cidx];
+ }
+
+ SI_ERROR(("ai_addrspace: Need to parse the erom again to find %d base addr"
+ " in %d slave port\n",
+ baidx, spidx));
+
+ return 0;
+
+}
+
+/* Return the size of the nth address space in the current core
+* Arguments:
+* sih : Pointer to struct si_t
+* spidx : slave port index
+* baidx : base address index
+*/
+uint32
+ai_addrspacesize(const si_t *sih, uint spidx, uint baidx)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+ uint cidx;
+
+ cidx = sii->curidx;
+ if (spidx == CORE_SLAVE_PORT_0) {
+ if (baidx == CORE_BASE_ADDR_0)
+ return cores_info->coresba_size[cidx];
+ else if (baidx == CORE_BASE_ADDR_1)
+ return cores_info->coresba2_size[cidx];
+ }
+ else if (spidx == CORE_SLAVE_PORT_1) {
+ if (baidx == CORE_BASE_ADDR_0)
+ return cores_info->csp2ba_size[cidx];
+ }
+
+ SI_ERROR(("ai_addrspacesize: Need to parse the erom again to find %d"
+ " base addr in %d slave port\n",
+ baidx, spidx));
+
+ return 0;
+}
+
+uint
+ai_flag(si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+#if !defined(BCMDONGLEHOST)
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+#endif
+ aidmp_t *ai;
+
+ if (PMU_DMP()) {
+ uint idx, flag;
+ idx = sii->curidx;
+ ai_setcoreidx(sih, SI_CC_IDX);
+ flag = ai_flag_alt(sih);
+ ai_setcoreidx(sih, idx);
+ return flag;
+ }
+
+ ai = sii->curwrap;
+ ASSERT(ai != NULL);
+
+ return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f);
+}
+
+uint
+ai_flag_alt(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ aidmp_t *ai = sii->curwrap;
+
+ return ((R_REG(sii->osh, &ai->oobselouta30) >> AI_OOBSEL_1_SHIFT) & AI_OOBSEL_MASK);
+}
+
+void
+ai_setint(const si_t *sih, int siflag)
+{
+ BCM_REFERENCE(sih);
+ BCM_REFERENCE(siflag);
+
+ /* TODO: Figure out how to set interrupt mask in ai */
+}
+
+uint
+BCMPOSTTRAPFN(ai_wrap_reg)(const si_t *sih, uint32 offset, uint32 mask, uint32 val)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ uint32 *addr = (uint32 *) ((uchar *)(sii->curwrap) + offset);
+
+ if (mask || val) {
+ uint32 w = R_REG(sii->osh, addr);
+ w &= ~mask;
+ w |= val;
+ W_REG(sii->osh, addr, w);
+ }
+ return (R_REG(sii->osh, addr));
+}
+
+uint
+ai_corevendor(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+ uint32 cia;
+
+ cia = cores_info->cia[sii->curidx];
+ return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT);
+}
+
+uint
+BCMPOSTTRAPFN(ai_corerev)(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+ uint32 cib;
+
+ cib = cores_info->cib[sii->curidx];
+ return ((cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
+}
+
+uint
+ai_corerev_minor(const si_t *sih)
+{
+ return (ai_core_sflags(sih, 0, 0) >> SISF_MINORREV_D11_SHIFT) &
+ SISF_MINORREV_D11_MASK;
+}
+
+bool
+BCMPOSTTRAPFN(ai_iscoreup)(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ aidmp_t *ai = sii->curwrap;
+
+ return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) &&
+ ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
+}
+
+/*
+ * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
+ * switch back to the original core, and return the new value.
+ *
+ * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
+ *
+ * Also, when using pci/pcie, we can optimize away the core switching for pci registers
+ * and (on newer pci cores) chipcommon registers.
+ */
+uint
+BCMPOSTTRAPFN(ai_corereg)(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+ uint origidx = 0;
+ volatile uint32 *r = NULL;
+ uint w;
+ bcm_int_bitmask_t intr_val;
+ bool fast = FALSE;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+ ASSERT(GOODIDX(coreidx, sii->numcores));
+ ASSERT(regoff < SI_CORE_SIZE);
+ ASSERT((val & ~mask) == 0);
+
+ if (coreidx >= SI_MAXCORES)
+ return 0;
+
+ if (BUSTYPE(sih->bustype) == SI_BUS) {
+ /* If internal bus, we can always get at everything */
+ fast = TRUE;
+ /* map if does not exist */
+ if (!cores_info->regs[coreidx]) {
+ cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
+ SI_CORE_SIZE);
+ ASSERT(GOODREGS(cores_info->regs[coreidx]));
+ }
+ r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
+ } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+ if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+ /* Chipc registers are mapped at 12KB */
+
+ fast = TRUE;
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ PCI_16KB0_CCREGS_OFFSET + regoff);
+ } else if (sii->pub.buscoreidx == coreidx) {
+ /* pci registers are at either in the last 2KB of an 8KB window
+ * or, in pcie and pci rev 13 at 8KB
+ */
+ fast = TRUE;
+ if (SI_FAST(sii))
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ PCI_16KB0_PCIREGS_OFFSET + regoff);
+ else
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ ((regoff >= SBCONFIGOFF) ?
+ PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+ regoff);
+ }
+ }
+
+ if (!fast) {
+ INTR_OFF(sii, &intr_val);
+
+ /* save current core index */
+ origidx = si_coreidx(&sii->pub);
+
+ /* switch core */
+ r = (volatile uint32*) ((volatile uchar*) ai_setcoreidx(&sii->pub, coreidx) +
+ regoff);
+ }
+ ASSERT(r != NULL);
+
+ /* mask and set */
+ if (mask || val) {
+ w = (R_REG(sii->osh, r) & ~mask) | val;
+ W_REG(sii->osh, r, w);
+ }
+
+ /* readback */
+ w = R_REG(sii->osh, r);
+
+ if (!fast) {
+ /* restore core index */
+ if (origidx != coreidx)
+ ai_setcoreidx(&sii->pub, origidx);
+
+ INTR_RESTORE(sii, &intr_val);
+ }
+
+ return (w);
+}
+
+/*
+ * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
+ * switch back to the original core, and return the new value.
+ *
+ * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
+ *
+ * Also, when using pci/pcie, we can optimize away the core switching for pci registers
+ * and (on newer pci cores) chipcommon registers.
+ */
+uint
+ai_corereg_writeonly(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+ uint origidx = 0;
+ volatile uint32 *r = NULL;
+ uint w = 0;
+ bcm_int_bitmask_t intr_val;
+ bool fast = FALSE;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+ ASSERT(GOODIDX(coreidx, sii->numcores));
+ ASSERT(regoff < SI_CORE_SIZE);
+ ASSERT((val & ~mask) == 0);
+
+ if (coreidx >= SI_MAXCORES)
+ return 0;
+
+ if (BUSTYPE(sih->bustype) == SI_BUS) {
+ /* If internal bus, we can always get at everything */
+ fast = TRUE;
+ /* map if does not exist */
+ if (!cores_info->regs[coreidx]) {
+ cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
+ SI_CORE_SIZE);
+ ASSERT(GOODREGS(cores_info->regs[coreidx]));
+ }
+ r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
+ } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+ if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+ /* Chipc registers are mapped at 12KB */
+
+ fast = TRUE;
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ PCI_16KB0_CCREGS_OFFSET + regoff);
+ } else if (sii->pub.buscoreidx == coreidx) {
+ /* pci registers are at either in the last 2KB of an 8KB window
+ * or, in pcie and pci rev 13 at 8KB
+ */
+ fast = TRUE;
+ if (SI_FAST(sii))
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ PCI_16KB0_PCIREGS_OFFSET + regoff);
+ else
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ ((regoff >= SBCONFIGOFF) ?
+ PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+ regoff);
+ }
+ }
+
+ if (!fast) {
+ INTR_OFF(sii, &intr_val);
+
+ /* save current core index */
+ origidx = si_coreidx(&sii->pub);
+
+ /* switch core */
+ r = (volatile uint32*) ((volatile uchar*) ai_setcoreidx(&sii->pub, coreidx) +
+ regoff);
+ }
+ ASSERT(r != NULL);
+
+ /* mask and set */
+ if (mask || val) {
+ w = (R_REG(sii->osh, r) & ~mask) | val;
+ W_REG(sii->osh, r, w);
+ }
+
+ if (!fast) {
+ /* restore core index */
+ if (origidx != coreidx)
+ ai_setcoreidx(&sii->pub, origidx);
+
+ INTR_RESTORE(sii, &intr_val);
+ }
+
+ return (w);
+}
+
+/*
+ * If there is no need for fiddling with interrupts or core switches (typically silicon
+ * back plane registers, pci registers and chipcommon registers), this function
+ * returns the register offset on this core to a mapped address. This address can
+ * be used for W_REG/R_REG directly.
+ *
+ * For accessing registers that would need a core switch, this function will return
+ * NULL.
+ */
+volatile uint32 *
+BCMPOSTTRAPFN(ai_corereg_addr)(si_t *sih, uint coreidx, uint regoff)
+{
+ volatile uint32 *r = NULL;
+ bool fast = FALSE;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+ ASSERT(GOODIDX(coreidx, sii->numcores));
+ ASSERT(regoff < SI_CORE_SIZE);
+
+ if (coreidx >= SI_MAXCORES)
+ return 0;
+
+ if (BUSTYPE(sih->bustype) == SI_BUS) {
+ /* If internal bus, we can always get at everything */
+ fast = TRUE;
+ /* map if does not exist */
+ if (!cores_info->regs[coreidx]) {
+ cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
+ SI_CORE_SIZE);
+ ASSERT(GOODREGS(cores_info->regs[coreidx]));
+ }
+ r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
+ } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+ if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+ /* Chipc registers are mapped at 12KB */
+
+ fast = TRUE;
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ PCI_16KB0_CCREGS_OFFSET + regoff);
+ } else if (sii->pub.buscoreidx == coreidx) {
+ /* pci registers are at either in the last 2KB of an 8KB window
+ * or, in pcie and pci rev 13 at 8KB
+ */
+ fast = TRUE;
+ if (SI_FAST(sii))
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ PCI_16KB0_PCIREGS_OFFSET + regoff);
+ else
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ ((regoff >= SBCONFIGOFF) ?
+ PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+ regoff);
+ }
+ }
+
+ if (!fast) {
+ ASSERT(sii->curidx == coreidx);
+ r = (volatile uint32*) ((volatile uchar*)sii->curmap + regoff);
+ }
+
+ return (r);
+}
+
+void
+ai_core_disable(const si_t *sih, uint32 bits)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ volatile uint32 dummy;
+ uint32 status;
+ aidmp_t *ai;
+
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+ /* if core is already in reset, just return */
+ if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) {
+ return;
+ }
+
+ /* ensure there are no pending backplane operations */
+ SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
+ /* if pending backplane ops still, try waiting longer */
+ if (status != 0) {
+ /* 300usecs was sufficient to allow backplane ops to clear for big hammer */
+ /* during driver load we may need more time */
+ SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 10000);
+ /* if still pending ops, continue on and try disable anyway */
+ /* this is in big hammer path, so don't call wl_reinit in this case... */
+#ifdef BCMDBG
+ if (status != 0) {
+ SI_ERROR(("ai_core_disable: WARN: resetstatus=%0x on core disable\n",
+ status));
+ }
+#endif
+ }
+
+ W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
+ dummy = R_REG(sii->osh, &ai->resetctrl);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(1);
+
+ W_REG(sii->osh, &ai->ioctrl, bits);
+ dummy = R_REG(sii->osh, &ai->ioctrl);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(10);
+}
+
+/* reset and re-enable a core
+ * inputs:
+ * bits - core specific bits that are set during and after reset sequence
+ * resetbits - core specific bits that are set only during reset sequence
+ */
+static void
+BCMPOSTTRAPFN(_ai_core_reset)(const si_t *sih, uint32 bits, uint32 resetbits)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ aidmp_t *ai;
+ volatile uint32 dummy;
+ uint loop_counter = 10;
+
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+ /* ensure there are no pending backplane operations */
+ SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
+#ifdef BCMDBG_ERR
+ if (dummy != 0) {
+ SI_ERROR(("_ai_core_reset: WARN1: resetstatus=0x%0x\n", dummy));
+ }
+#endif /* BCMDBG_ERR */
+
+ /* put core into reset state */
+ W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
+ OSL_DELAY(10);
+
+ /* ensure there are no pending backplane operations */
+ SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
+
+ W_REG(sii->osh, &ai->ioctrl, (bits | resetbits | SICF_FGC | SICF_CLOCK_EN));
+ dummy = R_REG(sii->osh, &ai->ioctrl);
+ BCM_REFERENCE(dummy);
+#ifdef UCM_CORRUPTION_WAR
+ if (si_coreid(sih) == D11_CORE_ID) {
+ /* Reset FGC */
+ OSL_DELAY(1);
+ W_REG(sii->osh, &ai->ioctrl, (dummy & (~SICF_FGC)));
+ }
+#endif /* UCM_CORRUPTION_WAR */
+ /* ensure there are no pending backplane operations */
+ SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
+#ifdef BCMDBG_ERR
+ if (dummy != 0)
+ SI_ERROR(("_ai_core_reset: WARN2: resetstatus=0x%0x\n", dummy));
+#endif
+
+ while (R_REG(sii->osh, &ai->resetctrl) != 0 && --loop_counter != 0) {
+ /* ensure there are no pending backplane operations */
+ SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
+#ifdef BCMDBG_ERR
+ if (dummy != 0)
+ SI_ERROR(("_ai_core_reset: WARN3 resetstatus=0x%0x\n", dummy));
+#endif
+
+ /* take core out of reset */
+ W_REG(sii->osh, &ai->resetctrl, 0);
+
+ /* ensure there are no pending backplane operations */
+ SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
+ }
+
+#ifdef BCMDBG_ERR
+ if (loop_counter == 0) {
+ SI_ERROR(("_ai_core_reset: Failed to take core 0x%x out of reset\n",
+ si_coreid(sih)));
+ }
+#endif
+
+#ifdef UCM_CORRUPTION_WAR
+ /* Pulse FGC after lifting Reset */
+ W_REG(sii->osh, &ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
+#else
+ W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
+#endif /* UCM_CORRUPTION_WAR */
+ dummy = R_REG(sii->osh, &ai->ioctrl);
+ BCM_REFERENCE(dummy);
+#ifdef UCM_CORRUPTION_WAR
+ if (si_coreid(sih) == D11_CORE_ID) {
+ /* Reset FGC */
+ OSL_DELAY(1);
+ W_REG(sii->osh, &ai->ioctrl, (dummy & (~SICF_FGC)));
+ }
+#endif /* UCM_CORRUPTION_WAR */
+ OSL_DELAY(1);
+}
+
+void
+BCMPOSTTRAPFN(ai_core_reset)(si_t *sih, uint32 bits, uint32 resetbits)
+{
+ si_info_t *sii = SI_INFO(sih);
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+ uint idx = sii->curidx;
+
+ if (cores_info->wrapba3[idx] != 0) {
+ ai_setcoreidx_3rdwrap(sih, idx);
+ _ai_core_reset(sih, bits, resetbits);
+ ai_setcoreidx(sih, idx);
+ }
+
+ if (cores_info->wrapba2[idx] != 0) {
+ ai_setcoreidx_2ndwrap(sih, idx);
+ _ai_core_reset(sih, bits, resetbits);
+ ai_setcoreidx(sih, idx);
+ }
+
+ _ai_core_reset(sih, bits, resetbits);
+}
+
+#ifdef BOOKER_NIC400_INF
+void
+BCMPOSTTRAPFN(ai_core_reset_ext)(const si_t *sih, uint32 bits, uint32 resetbits)
+{
+ _ai_core_reset(sih, bits, resetbits);
+}
+#endif /* BOOKER_NIC400_INF */
+
+void
+ai_core_cflags_wo(const si_t *sih, uint32 mask, uint32 val)
+{
+ const si_info_t *sii = SI_INFO(sih);
+#if !defined(BCMDONGLEHOST)
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+#endif
+ aidmp_t *ai;
+ uint32 w;
+
+ if (PMU_DMP()) {
+ SI_ERROR(("ai_core_cflags_wo: Accessing PMU DMP register (ioctrl)\n"));
+ return;
+ }
+
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+ ASSERT((val & ~mask) == 0);
+
+ if (mask || val) {
+ w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
+ W_REG(sii->osh, &ai->ioctrl, w);
+ }
+}
+
+uint32
+BCMPOSTTRAPFN(ai_core_cflags)(const si_t *sih, uint32 mask, uint32 val)
+{
+ const si_info_t *sii = SI_INFO(sih);
+#if !defined(BCMDONGLEHOST)
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+#endif
+ aidmp_t *ai;
+ uint32 w;
+
+ if (PMU_DMP()) {
+ SI_ERROR(("ai_core_cflags: Accessing PMU DMP register (ioctrl)\n"));
+ return 0;
+ }
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+ ASSERT((val & ~mask) == 0);
+
+ if (mask || val) {
+ w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
+ W_REG(sii->osh, &ai->ioctrl, w);
+ }
+
+ return R_REG(sii->osh, &ai->ioctrl);
+}
+
+uint32
+ai_core_sflags(const si_t *sih, uint32 mask, uint32 val)
+{
+ const si_info_t *sii = SI_INFO(sih);
+#if !defined(BCMDONGLEHOST)
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+#endif
+ aidmp_t *ai;
+ uint32 w;
+
+ if (PMU_DMP()) {
+ SI_ERROR(("ai_core_sflags: Accessing PMU DMP register (ioctrl)\n"));
+ return 0;
+ }
+
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+ ASSERT((val & ~mask) == 0);
+ ASSERT((mask & ~SISF_CORE_BITS) == 0);
+
+ if (mask || val) {
+ w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
+ W_REG(sii->osh, &ai->iostatus, w);
+ }
+
+ return R_REG(sii->osh, &ai->iostatus);
+}
+
+#if defined(BCMDBG) || defined(BCMDBG_DUMP) || defined(BCMDBG_PHYDUMP)
+/* print interesting aidmp registers */
+void
+ai_dumpregs(const si_t *sih, struct bcmstrbuf *b)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ osl_t *osh;
+ aidmp_t *ai;
+ uint i;
+ uint32 prev_value = 0;
+ const axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
+ uint32 cfg_reg = 0;
+ uint bar0_win_offset = 0;
+
+ osh = sii->osh;
+
+ /* Save and restore wrapper access window */
+ if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+ if (PCIE_GEN2(sii)) {
+ cfg_reg = PCIE2_BAR0_CORE2_WIN2;
+ bar0_win_offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
+ } else {
+ cfg_reg = PCI_BAR0_WIN2;
+ bar0_win_offset = PCI_BAR0_WIN2_OFFSET;
+ }
+
+ prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
+
+ if (prev_value == ID32_INVALID) {
+ SI_PRINT(("ai_dumpregs, PCI_BAR0_WIN2 - %x\n", prev_value));
+ return;
+ }
+ }
+
+ bcm_bprintf(b, "ChipNum:%x, ChipRev;%x, BusType:%x, BoardType:%x, BoardVendor:%x\n\n",
+ sih->chip, sih->chiprev, sih->bustype, sih->boardtype, sih->boardvendor);
+
+ for (i = 0; i < sii->axi_num_wrappers; i++) {
+
+ if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+ /* Set BAR0 window to bridge wapper base address */
+ OSL_PCI_WRITE_CONFIG(osh,
+ cfg_reg, 4, axi_wrapper[i].wrapper_addr);
+
+ ai = (aidmp_t *) ((volatile uint8*)sii->curmap + bar0_win_offset);
+ } else {
+ ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
+ }
+
+ bcm_bprintf(b, "core 0x%x: core_rev:%d, %s_WR ADDR:%x \n", axi_wrapper[i].cid,
+ axi_wrapper[i].rev,
+ axi_wrapper[i].wrapper_type == AI_SLAVE_WRAPPER ? "SLAVE" : "MASTER",
+ axi_wrapper[i].wrapper_addr);
+
+ bcm_bprintf(b, "ioctrlset 0x%x ioctrlclear 0x%x ioctrl 0x%x iostatus 0x%x "
+ "ioctrlwidth 0x%x iostatuswidth 0x%x\n"
+ "resetctrl 0x%x resetstatus 0x%x resetreadid 0x%x resetwriteid 0x%x\n"
+ "errlogctrl 0x%x errlogdone 0x%x errlogstatus 0x%x "
+ "errlogaddrlo 0x%x errlogaddrhi 0x%x\n"
+ "errlogid 0x%x errloguser 0x%x errlogflags 0x%x\n"
+ "intstatus 0x%x config 0x%x itcr 0x%x\n\n",
+ R_REG(osh, &ai->ioctrlset),
+ R_REG(osh, &ai->ioctrlclear),
+ R_REG(osh, &ai->ioctrl),
+ R_REG(osh, &ai->iostatus),
+ R_REG(osh, &ai->ioctrlwidth),
+ R_REG(osh, &ai->iostatuswidth),
+ R_REG(osh, &ai->resetctrl),
+ R_REG(osh, &ai->resetstatus),
+ R_REG(osh, &ai->resetreadid),
+ R_REG(osh, &ai->resetwriteid),
+ R_REG(osh, &ai->errlogctrl),
+ R_REG(osh, &ai->errlogdone),
+ R_REG(osh, &ai->errlogstatus),
+ R_REG(osh, &ai->errlogaddrlo),
+ R_REG(osh, &ai->errlogaddrhi),
+ R_REG(osh, &ai->errlogid),
+ R_REG(osh, &ai->errloguser),
+ R_REG(osh, &ai->errlogflags),
+ R_REG(osh, &ai->intstatus),
+ R_REG(osh, &ai->config),
+ R_REG(osh, &ai->itcr));
+ }
+
+ /* Restore the initial wrapper space */
+ if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+ if (prev_value && cfg_reg) {
+ OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
+ }
+ }
+}
+#endif /* BCMDBG || BCMDBG_DUMP || BCMDBG_PHYDUMP */
+
+#ifdef BCMDBG
+static void
+_ai_view(osl_t *osh, aidmp_t *ai, uint32 cid, uint32 addr, bool verbose)
+{
+ uint32 config;
+
+ config = R_REG(osh, &ai->config);
+ SI_PRINT(("\nCore ID: 0x%x, addr 0x%x, config 0x%x\n", cid, addr, config));
+
+ if (config & AICFG_RST)
+ SI_PRINT(("resetctrl 0x%x, resetstatus 0x%x, resetreadid 0x%x, resetwriteid 0x%x\n",
+ R_REG(osh, &ai->resetctrl), R_REG(osh, &ai->resetstatus),
+ R_REG(osh, &ai->resetreadid), R_REG(osh, &ai->resetwriteid)));
+
+ if (config & AICFG_IOC)
+ SI_PRINT(("ioctrl 0x%x, width %d\n", R_REG(osh, &ai->ioctrl),
+ R_REG(osh, &ai->ioctrlwidth)));
+
+ if (config & AICFG_IOS)
+ SI_PRINT(("iostatus 0x%x, width %d\n", R_REG(osh, &ai->iostatus),
+ R_REG(osh, &ai->iostatuswidth)));
+
+ if (config & AICFG_ERRL) {
+ SI_PRINT(("errlogctrl 0x%x, errlogdone 0x%x, errlogstatus 0x%x, intstatus 0x%x\n",
+ R_REG(osh, &ai->errlogctrl), R_REG(osh, &ai->errlogdone),
+ R_REG(osh, &ai->errlogstatus), R_REG(osh, &ai->intstatus)));
+ SI_PRINT(("errlogid 0x%x, errloguser 0x%x, errlogflags 0x%x, errlogaddr "
+ "0x%x/0x%x\n",
+ R_REG(osh, &ai->errlogid), R_REG(osh, &ai->errloguser),
+ R_REG(osh, &ai->errlogflags), R_REG(osh, &ai->errlogaddrhi),
+ R_REG(osh, &ai->errlogaddrlo)));
+ }
+
+ if (verbose && (config & AICFG_OOB)) {
+ SI_PRINT(("oobselina30 0x%x, oobselina74 0x%x\n",
+ R_REG(osh, &ai->oobselina30), R_REG(osh, &ai->oobselina74)));
+ SI_PRINT(("oobselinb30 0x%x, oobselinb74 0x%x\n",
+ R_REG(osh, &ai->oobselinb30), R_REG(osh, &ai->oobselinb74)));
+ SI_PRINT(("oobselinc30 0x%x, oobselinc74 0x%x\n",
+ R_REG(osh, &ai->oobselinc30), R_REG(osh, &ai->oobselinc74)));
+ SI_PRINT(("oobselind30 0x%x, oobselind74 0x%x\n",
+ R_REG(osh, &ai->oobselind30), R_REG(osh, &ai->oobselind74)));
+ SI_PRINT(("oobselouta30 0x%x, oobselouta74 0x%x\n",
+ R_REG(osh, &ai->oobselouta30), R_REG(osh, &ai->oobselouta74)));
+ SI_PRINT(("oobseloutb30 0x%x, oobseloutb74 0x%x\n",
+ R_REG(osh, &ai->oobseloutb30), R_REG(osh, &ai->oobseloutb74)));
+ SI_PRINT(("oobseloutc30 0x%x, oobseloutc74 0x%x\n",
+ R_REG(osh, &ai->oobseloutc30), R_REG(osh, &ai->oobseloutc74)));
+ SI_PRINT(("oobseloutd30 0x%x, oobseloutd74 0x%x\n",
+ R_REG(osh, &ai->oobseloutd30), R_REG(osh, &ai->oobseloutd74)));
+ SI_PRINT(("oobsynca 0x%x, oobseloutaen 0x%x\n",
+ R_REG(osh, &ai->oobsynca), R_REG(osh, &ai->oobseloutaen)));
+ SI_PRINT(("oobsyncb 0x%x, oobseloutben 0x%x\n",
+ R_REG(osh, &ai->oobsyncb), R_REG(osh, &ai->oobseloutben)));
+ SI_PRINT(("oobsyncc 0x%x, oobseloutcen 0x%x\n",
+ R_REG(osh, &ai->oobsyncc), R_REG(osh, &ai->oobseloutcen)));
+ SI_PRINT(("oobsyncd 0x%x, oobseloutden 0x%x\n",
+ R_REG(osh, &ai->oobsyncd), R_REG(osh, &ai->oobseloutden)));
+ SI_PRINT(("oobaextwidth 0x%x, oobainwidth 0x%x, oobaoutwidth 0x%x\n",
+ R_REG(osh, &ai->oobaextwidth), R_REG(osh, &ai->oobainwidth),
+ R_REG(osh, &ai->oobaoutwidth)));
+ SI_PRINT(("oobbextwidth 0x%x, oobbinwidth 0x%x, oobboutwidth 0x%x\n",
+ R_REG(osh, &ai->oobbextwidth), R_REG(osh, &ai->oobbinwidth),
+ R_REG(osh, &ai->oobboutwidth)));
+ SI_PRINT(("oobcextwidth 0x%x, oobcinwidth 0x%x, oobcoutwidth 0x%x\n",
+ R_REG(osh, &ai->oobcextwidth), R_REG(osh, &ai->oobcinwidth),
+ R_REG(osh, &ai->oobcoutwidth)));
+ SI_PRINT(("oobdextwidth 0x%x, oobdinwidth 0x%x, oobdoutwidth 0x%x\n",
+ R_REG(osh, &ai->oobdextwidth), R_REG(osh, &ai->oobdinwidth),
+ R_REG(osh, &ai->oobdoutwidth)));
+ }
+}
+
+void
+ai_view(const si_t *sih, bool verbose)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+ osl_t *osh;
+ aidmp_t *ai;
+ uint32 cid, addr;
+
+ ai = sii->curwrap;
+ osh = sii->osh;
+
+ if (PMU_DMP()) {
+ SI_ERROR(("Cannot access pmu DMP\n"));
+ return;
+ }
+ cid = cores_info->coreid[sii->curidx];
+ addr = cores_info->wrapba[sii->curidx];
+ _ai_view(osh, ai, cid, addr, verbose);
+}
+
+void
+ai_viewall(si_t *sih, bool verbose)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+ osl_t *osh;
+ aidmp_t *ai;
+ uint32 cid, addr;
+ uint i;
+
+ osh = sii->osh;
+ for (i = 0; i < sii->numcores; i++) {
+ si_setcoreidx(sih, i);
+
+ if (PMU_DMP()) {
+ SI_ERROR(("Skipping pmu DMP\n"));
+ continue;
+ }
+ ai = sii->curwrap;
+ cid = cores_info->coreid[sii->curidx];
+ addr = cores_info->wrapba[sii->curidx];
+ _ai_view(osh, ai, cid, addr, verbose);
+ }
+}
+#endif /* BCMDBG */
+
+void
+ai_update_backplane_timeouts(const si_t *sih, bool enable, uint32 timeout_exp, uint32 cid)
+{
+#if defined(AXI_TIMEOUTS) || defined(AXI_TIMEOUTS_NIC)
+ const si_info_t *sii = SI_INFO(sih);
+ aidmp_t *ai;
+ uint32 i;
+ axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
+ uint32 errlogctrl = (enable << AIELC_TO_ENAB_SHIFT) |
+ ((timeout_exp << AIELC_TO_EXP_SHIFT) & AIELC_TO_EXP_MASK);
+
+#ifdef AXI_TIMEOUTS_NIC
+ uint32 prev_value = 0;
+ osl_t *osh = sii->osh;
+ uint32 cfg_reg = 0;
+ uint32 offset = 0;
+#endif /* AXI_TIMEOUTS_NIC */
+
+ if ((sii->axi_num_wrappers == 0) ||
+#ifdef AXI_TIMEOUTS_NIC
+ (!PCIE(sii)) ||
+#endif /* AXI_TIMEOUTS_NIC */
+ FALSE) {
+ SI_VMSG((" iai_update_backplane_timeouts, axi_num_wrappers:%d, Is_PCIE:%d,"
+ " BUS_TYPE:%d, ID:%x\n",
+ sii->axi_num_wrappers, PCIE(sii),
+ BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
+ return;
+ }
+
+#ifdef AXI_TIMEOUTS_NIC
+ /* Save and restore the wrapper access window */
+ if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+ if (PCIE_GEN1(sii)) {
+ cfg_reg = PCI_BAR0_WIN2;
+ offset = PCI_BAR0_WIN2_OFFSET;
+ } else if (PCIE_GEN2(sii)) {
+ cfg_reg = PCIE2_BAR0_CORE2_WIN2;
+ offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
+ }
+ else {
+ ASSERT(!"!PCIE_GEN1 && !PCIE_GEN2");
+ }
+
+ prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
+ if (prev_value == ID32_INVALID) {
+ SI_PRINT(("ai_update_backplane_timeouts, PCI_BAR0_WIN2 - %x\n",
+ prev_value));
+ return;
+ }
+ }
+#endif /* AXI_TIMEOUTS_NIC */
+
+ for (i = 0; i < sii->axi_num_wrappers; ++i) {
+ /* WAR for wrong EROM entries w.r.t slave and master wrapper
+ * for ADB bridge core...so checking actual wrapper config to determine type
+ * http://jira.broadcom.com/browse/HW4388-905
+ */
+ if ((cid == 0 || cid == ADB_BRIDGE_ID) &&
+ (axi_wrapper[i].cid == ADB_BRIDGE_ID)) {
+ /* WAR is applicable only to 89B0 and 89C0 */
+ if (CCREV(sih->ccrev) == 70) {
+ ai = (aidmp_t *)(uintptr)axi_wrapper[i].wrapper_addr;
+ if (R_REG(sii->osh, &ai->config) & WRAPPER_TIMEOUT_CONFIG) {
+ axi_wrapper[i].wrapper_type = AI_SLAVE_WRAPPER;
+ } else {
+ axi_wrapper[i].wrapper_type = AI_MASTER_WRAPPER;
+ }
+ }
+ }
+ if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER || ((BCM4389_CHIP(sih->chip) ||
+ BCM4388_CHIP(sih->chip)) &&
+ (axi_wrapper[i].wrapper_addr == WL_BRIDGE1_S ||
+ axi_wrapper[i].wrapper_addr == WL_BRIDGE2_S))) {
+ SI_VMSG(("SKIP ENABLE BPT: MFG:%x, CID:%x, ADDR:%x\n",
+ axi_wrapper[i].mfg,
+ axi_wrapper[i].cid,
+ axi_wrapper[i].wrapper_addr));
+ continue;
+ }
+
+ /* Update only given core if requested */
+ if ((cid != 0) && (axi_wrapper[i].cid != cid)) {
+ continue;
+ }
+
+#ifdef AXI_TIMEOUTS_NIC
+ if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+ /* Set BAR0_CORE2_WIN2 to bridge wapper base address */
+ OSL_PCI_WRITE_CONFIG(osh,
+ cfg_reg, 4, axi_wrapper[i].wrapper_addr);
+
+ /* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */
+ ai = (aidmp_t *) (DISCARD_QUAL(sii->curmap, uint8) + offset);
+ }
+ else
+#endif /* AXI_TIMEOUTS_NIC */
+ {
+ ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
+ }
+
+ W_REG(sii->osh, &ai->errlogctrl, errlogctrl);
+
+ SI_VMSG(("ENABLED BPT: MFG:%x, CID:%x, ADDR:%x, ERR_CTRL:%x\n",
+ axi_wrapper[i].mfg,
+ axi_wrapper[i].cid,
+ axi_wrapper[i].wrapper_addr,
+ R_REG(sii->osh, &ai->errlogctrl)));
+ }
+
+#ifdef AXI_TIMEOUTS_NIC
+ /* Restore the initial wrapper space */
+ if (prev_value) {
+ OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
+ }
+#endif /* AXI_TIMEOUTS_NIC */
+
+#endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */
+}
+
+#if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
+
+/* slave error is ignored, so account for those cases */
+static uint32 si_ignore_errlog_cnt = 0;
+
+static bool
+BCMPOSTTRAPFN(ai_ignore_errlog)(const si_info_t *sii, const aidmp_t *ai,
+ uint32 lo_addr, uint32 hi_addr, uint32 err_axi_id, uint32 errsts)
+{
+ uint32 ignore_errsts = AIELS_SLAVE_ERR;
+ uint32 ignore_errsts_2 = 0;
+ uint32 ignore_hi = BT_CC_SPROM_BADREG_HI;
+ uint32 ignore_lo = BT_CC_SPROM_BADREG_LO;
+ uint32 ignore_size = BT_CC_SPROM_BADREG_SIZE;
+ bool address_check = TRUE;
+ uint32 axi_id = 0;
+ uint32 axi_id2 = 0;
+ bool extd_axi_id_mask = FALSE;
+ uint32 axi_id_mask;
+
+ SI_PRINT(("err check: core %p, error %d, axi id 0x%04x, addr(0x%08x:%08x)\n",
+ ai, errsts, err_axi_id, hi_addr, lo_addr));
+
+ /* ignore the BT slave errors if the errlog is to chipcommon addr 0x190 */
+ switch (CHIPID(sii->pub.chip)) {
+#if defined(BT_WLAN_REG_ON_WAR)
+ /*
+ * 4389B0/C0 - WL and BT turn on WAR, ignore AXI error originating from
+ * AHB-AXI bridge i.e, any slave error or timeout from BT access
+ */
+ case BCM4389_CHIP_GRPID:
+ axi_id = BCM4389_BT_AXI_ID;
+ ignore_errsts = AIELS_SLAVE_ERR;
+ axi_id2 = BCM4389_BT_AXI_ID;
+ ignore_errsts_2 = AIELS_TIMEOUT;
+ address_check = FALSE;
+ extd_axi_id_mask = TRUE;
+ break;
+#endif /* BT_WLAN_REG_ON_WAR */
+#ifdef BTOVERPCIE
+ case BCM4388_CHIP_GRPID:
+ axi_id = BCM4388_BT_AXI_ID;
+ /* For BT over PCIE, ignore any slave error from BT. */
+ /* No need to check any address range */
+ address_check = FALSE;
+ ignore_errsts_2 = AIELS_DECODE;
+ break;
+ case BCM4369_CHIP_GRPID:
+ axi_id = BCM4369_BT_AXI_ID;
+ /* For BT over PCIE, ignore any slave error from BT. */
+ /* No need to check any address range */
+ address_check = FALSE;
+ ignore_errsts_2 = AIELS_DECODE;
+ break;
+#endif /* BTOVERPCIE */
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+#ifdef BTOVERPCIE
+ axi_id = BCM4378_BT_AXI_ID;
+ /* For BT over PCIE, ignore any slave error from BT. */
+ /* No need to check any address range */
+ address_check = FALSE;
+#endif /* BTOVERPCIE */
+ axi_id2 = BCM4378_ARM_PREFETCH_AXI_ID;
+ extd_axi_id_mask = TRUE;
+ ignore_errsts_2 = AIELS_DECODE;
+ break;
+#ifdef USE_HOSTMEM
+ case BCM43602_CHIP_ID:
+ axi_id = BCM43602_BT_AXI_ID;
+ address_check = FALSE;
+ break;
+#endif /* USE_HOSTMEM */
+ default:
+ return FALSE;
+ }
+
+ axi_id_mask = extd_axi_id_mask ? AI_ERRLOGID_AXI_ID_MASK_EXTD : AI_ERRLOGID_AXI_ID_MASK;
+
+ /* AXI ID check */
+ err_axi_id &= axi_id_mask;
+ errsts &= AIELS_ERROR_MASK;
+
+ /* check the ignore error cases. 2 checks */
+ if (!(((err_axi_id == axi_id) && (errsts == ignore_errsts)) ||
+ ((err_axi_id == axi_id2) && (errsts == ignore_errsts_2)))) {
+ /* not the error ignore cases */
+ return FALSE;
+
+ }
+
+ /* check the specific address checks now, if specified */
+ if (address_check) {
+ /* address range check */
+ if ((hi_addr != ignore_hi) ||
+ (lo_addr < ignore_lo) || (lo_addr >= (ignore_lo + ignore_size))) {
+ return FALSE;
+ }
+ }
+
+ SI_PRINT(("err check: ignored\n"));
+ return TRUE;
+}
+#endif /* defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) */
+
+#ifdef AXI_TIMEOUTS_NIC
+
+/* Function to return the APB bridge details corresponding to the core */
+static bool
+ai_get_apb_bridge(const si_t * sih, uint32 coreidx, uint32 *apb_id, uint32 * apb_coreunit)
+{
+ uint i;
+ uint32 core_base, core_end;
+ const si_info_t *sii = SI_INFO(sih);
+ static uint32 coreidx_cached = 0, apb_id_cached = 0, apb_coreunit_cached = 0;
+ uint32 tmp_coreunit = 0;
+ const si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+ if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
+ return FALSE;
+
+ /* Most of the time apb bridge query will be for d11 core.
+ * Maintain the last cache and return if found rather than iterating the table
+ */
+ if (coreidx_cached == coreidx) {
+ *apb_id = apb_id_cached;
+ *apb_coreunit = apb_coreunit_cached;
+ return TRUE;
+ }
+
+ core_base = cores_info->coresba[coreidx];
+ core_end = core_base + cores_info->coresba_size[coreidx];
+
+ for (i = 0; i < sii->numcores; i++) {
+ if (cores_info->coreid[i] == APB_BRIDGE_ID) {
+ uint32 apb_base;
+ uint32 apb_end;
+
+ apb_base = cores_info->coresba[i];
+ apb_end = apb_base + cores_info->coresba_size[i];
+
+ if ((core_base >= apb_base) &&
+ (core_end <= apb_end)) {
+ /* Current core is attached to this APB bridge */
+ *apb_id = apb_id_cached = APB_BRIDGE_ID;
+ *apb_coreunit = apb_coreunit_cached = tmp_coreunit;
+ coreidx_cached = coreidx;
+ return TRUE;
+ }
+ /* Increment the coreunit */
+ tmp_coreunit++;
+ }
+ }
+
+ return FALSE;
+}
+
+uint32
+ai_clear_backplane_to_fast(si_t *sih, void *addr)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ volatile const void *curmap = sii->curmap;
+ bool core_reg = FALSE;
+
+ /* Use fast path only for core register access */
+ if (((uintptr)addr >= (uintptr)curmap) &&
+ ((uintptr)addr < ((uintptr)curmap + SI_CORE_SIZE))) {
+ /* address being accessed is within current core reg map */
+ core_reg = TRUE;
+ }
+
+ if (core_reg) {
+ uint32 apb_id, apb_coreunit;
+
+ if (ai_get_apb_bridge(sih, si_coreidx(&sii->pub),
+ &apb_id, &apb_coreunit) == TRUE) {
+ /* Found the APB bridge corresponding to current core,
+ * Check for bus errors in APB wrapper
+ */
+ return ai_clear_backplane_to_per_core(sih,
+ apb_id, apb_coreunit, NULL);
+ }
+ }
+
+ /* Default is to poll for errors on all slave wrappers */
+ return si_clear_backplane_to(sih);
+}
+#endif /* AXI_TIMEOUTS_NIC */
+
+#if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
+static bool g_disable_backplane_logs = FALSE;
+
+static uint32 last_axi_error = AXI_WRAP_STS_NONE;
+static uint32 last_axi_error_log_status = 0;
+static uint32 last_axi_error_core = 0;
+static uint32 last_axi_error_wrap = 0;
+static uint32 last_axi_errlog_lo = 0;
+static uint32 last_axi_errlog_hi = 0;
+static uint32 last_axi_errlog_id = 0;
+
+/*
+ * API to clear the back plane timeout per core.
+ * Caller may pass optional wrapper address. If present this will be used as
+ * the wrapper base address. If wrapper base address is provided then caller
+ * must provide the coreid also.
+ * If both coreid and wrapper is zero, then err status of current bridge
+ * will be verified.
+ */
+uint32
+BCMPOSTTRAPFN(ai_clear_backplane_to_per_core)(si_t *sih, uint coreid, uint coreunit, void *wrap)
+{
+ int ret = AXI_WRAP_STS_NONE;
+ aidmp_t *ai = NULL;
+ uint32 errlog_status = 0;
+ const si_info_t *sii = SI_INFO(sih);
+ uint32 errlog_lo = 0, errlog_hi = 0, errlog_id = 0, errlog_flags = 0;
+ uint32 current_coreidx = si_coreidx(sih);
+ uint32 target_coreidx = si_findcoreidx(sih, coreid, coreunit);
+
+#if defined(AXI_TIMEOUTS_NIC)
+ si_axi_error_t * axi_error = sih->err_info ?
+ &sih->err_info->axi_error[sih->err_info->count] : NULL;
+#endif /* AXI_TIMEOUTS_NIC */
+ bool restore_core = FALSE;
+
+ if ((sii->axi_num_wrappers == 0) ||
+#ifdef AXI_TIMEOUTS_NIC
+ (!PCIE(sii)) ||
+#endif /* AXI_TIMEOUTS_NIC */
+ FALSE) {
+ SI_VMSG(("ai_clear_backplane_to_per_core, axi_num_wrappers:%d, Is_PCIE:%d,"
+ " BUS_TYPE:%d, ID:%x\n",
+ sii->axi_num_wrappers, PCIE(sii),
+ BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
+ return AXI_WRAP_STS_NONE;
+ }
+
+ if (wrap != NULL) {
+ ai = (aidmp_t *)wrap;
+ } else if (coreid && (target_coreidx != current_coreidx)) {
+
+ if (ai_setcoreidx(sih, target_coreidx) == NULL) {
+ /* Unable to set the core */
+ SI_PRINT(("Set Code Failed: coreid:%x, unit:%d, target_coreidx:%d\n",
+ coreid, coreunit, target_coreidx));
+ errlog_lo = target_coreidx;
+ ret = AXI_WRAP_STS_SET_CORE_FAIL;
+ goto end;
+ }
+
+ restore_core = TRUE;
+ ai = (aidmp_t *)si_wrapperregs(sih);
+ } else {
+ /* Read error status of current wrapper */
+ ai = (aidmp_t *)si_wrapperregs(sih);
+
+ /* Update CoreID to current Code ID */
+ coreid = si_coreid(sih);
+ }
+
+ /* read error log status */
+ errlog_status = R_REG(sii->osh, &ai->errlogstatus);
+
+ if (errlog_status == ID32_INVALID) {
+ /* Do not try to peek further */
+ SI_PRINT(("ai_clear_backplane_to_per_core, errlogstatus:%x - Slave Wrapper:%x\n",
+ errlog_status, coreid));
+ ret = AXI_WRAP_STS_WRAP_RD_ERR;
+ errlog_lo = (uint32)(uintptr)&ai->errlogstatus;
+ goto end;
+ }
+
+ if ((errlog_status & AIELS_ERROR_MASK) != 0) {
+ uint32 tmp;
+ uint32 count = 0;
+ /* set ErrDone to clear the condition */
+ W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
+
+ /* SPINWAIT on errlogstatus timeout status bits */
+ while ((tmp = R_REG(sii->osh, &ai->errlogstatus)) & AIELS_ERROR_MASK) {
+
+ if (tmp == ID32_INVALID) {
+ SI_PRINT(("ai_clear_backplane_to_per_core: prev errlogstatus:%x,"
+ " errlogstatus:%x\n",
+ errlog_status, tmp));
+ ret = AXI_WRAP_STS_WRAP_RD_ERR;
+ errlog_lo = (uint32)(uintptr)&ai->errlogstatus;
+ goto end;
+ }
+ /*
+ * Clear again, to avoid getting stuck in the loop, if a new error
+ * is logged after we cleared the first timeout
+ */
+ W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
+
+ count++;
+ OSL_DELAY(10);
+ if ((10 * count) > AI_REG_READ_TIMEOUT) {
+ errlog_status = tmp;
+ break;
+ }
+ }
+
+ errlog_lo = R_REG(sii->osh, &ai->errlogaddrlo);
+ errlog_hi = R_REG(sii->osh, &ai->errlogaddrhi);
+ errlog_id = R_REG(sii->osh, &ai->errlogid);
+ errlog_flags = R_REG(sii->osh, &ai->errlogflags);
+
+ /* we are already in the error path, so OK to check for the slave error */
+ if (ai_ignore_errlog(sii, ai, errlog_lo, errlog_hi, errlog_id,
+ errlog_status)) {
+ si_ignore_errlog_cnt++;
+ goto end;
+ }
+
+ /* only reset APB Bridge on timeout (not slave error, or dec error) */
+ switch (errlog_status & AIELS_ERROR_MASK) {
+ case AIELS_SLAVE_ERR:
+ SI_PRINT(("AXI slave error\n"));
+ ret |= AXI_WRAP_STS_SLAVE_ERR;
+ break;
+
+ case AIELS_TIMEOUT:
+ ai_reset_axi_to(sii, ai);
+ ret |= AXI_WRAP_STS_TIMEOUT;
+ break;
+
+ case AIELS_DECODE:
+ SI_PRINT(("AXI decode error\n"));
+#ifdef USE_HOSTMEM
+ /* Ignore known cases of CR4 prefetch abort bugs */
+ if ((errlog_id & (BCM_AXI_ID_MASK | BCM_AXI_ACCESS_TYPE_MASK)) !=
+ (BCM43xx_AXI_ACCESS_TYPE_PREFETCH | BCM43xx_CR4_AXI_ID))
+#endif
+ {
+ ret |= AXI_WRAP_STS_DECODE_ERR;
+ }
+ break;
+ default:
+ ASSERT(0); /* should be impossible */
+ }
+
+ if (errlog_status & AIELS_MULTIPLE_ERRORS) {
+ SI_PRINT(("Multiple AXI Errors\n"));
+ /* Set multiple errors bit only if actual error is not ignored */
+ if (ret) {
+ ret |= AXI_WRAP_STS_MULTIPLE_ERRORS;
+ }
+ }
+
+ SI_PRINT(("\tCoreID: %x\n", coreid));
+ SI_PRINT(("\t errlog: lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x"
+ ", status 0x%08x\n",
+ errlog_lo, errlog_hi, errlog_id, errlog_flags,
+ errlog_status));
+ }
+
+end:
+ if (ret != AXI_WRAP_STS_NONE) {
+ last_axi_error = ret;
+ last_axi_error_log_status = errlog_status;
+ last_axi_error_core = coreid;
+ last_axi_error_wrap = (uint32)ai;
+ last_axi_errlog_lo = errlog_lo;
+ last_axi_errlog_hi = errlog_hi;
+ last_axi_errlog_id = errlog_id;
+ }
+
+#if defined(AXI_TIMEOUTS_NIC)
+ if (axi_error && (ret != AXI_WRAP_STS_NONE)) {
+ axi_error->error = ret;
+ axi_error->coreid = coreid;
+ axi_error->errlog_lo = errlog_lo;
+ axi_error->errlog_hi = errlog_hi;
+ axi_error->errlog_id = errlog_id;
+ axi_error->errlog_flags = errlog_flags;
+ axi_error->errlog_status = errlog_status;
+ sih->err_info->count++;
+
+ if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) {
+ sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1;
+ SI_PRINT(("AXI Error log overflow\n"));
+ }
+ }
+#endif /* AXI_TIMEOUTS_NIC */
+
+ if (restore_core) {
+ if (ai_setcoreidx(sih, current_coreidx) == NULL) {
+ /* Unable to set the core */
+ return ID32_INVALID;
+ }
+ }
+
+ return ret;
+}
+
+/* reset AXI timeout */
+static void
+BCMPOSTTRAPFN(ai_reset_axi_to)(const si_info_t *sii, aidmp_t *ai)
+{
+ /* reset APB Bridge */
+ OR_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
+ /* sync write */
+ (void)R_REG(sii->osh, &ai->resetctrl);
+ /* clear Reset bit */
+ AND_REG(sii->osh, &ai->resetctrl, ~(AIRC_RESET));
+ /* sync write */
+ (void)R_REG(sii->osh, &ai->resetctrl);
+ SI_PRINT(("AXI timeout\n"));
+ if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) {
+ SI_PRINT(("reset failed on wrapper %p\n", ai));
+ g_disable_backplane_logs = TRUE;
+ }
+}
+
+void
+BCMPOSTTRAPFN(ai_wrapper_get_last_error)(const si_t *sih, uint32 *error_status, uint32 *core,
+ uint32 *lo, uint32 *hi, uint32 *id)
+{
+ *error_status = last_axi_error_log_status;
+ *core = last_axi_error_core;
+ *lo = last_axi_errlog_lo;
+ *hi = last_axi_errlog_hi;
+ *id = last_axi_errlog_id;
+}
+
+/* Function to check whether AXI timeout has been registered on a core */
+uint32
+ai_get_axi_timeout_reg(void)
+{
+ return (GOODREGS(last_axi_errlog_lo) ? last_axi_errlog_lo : 0);
+}
+#endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */
+
+uint32
+BCMPOSTTRAPFN(ai_findcoreidx_by_axiid)(const si_t *sih, uint32 axiid)
+{
+ uint coreid = 0;
+ uint coreunit = 0;
+ const axi_to_coreidx_t *axi2coreidx = NULL;
+ switch (CHIPID(sih->chip)) {
+ case BCM4369_CHIP_GRPID:
+ axi2coreidx = axi2coreidx_4369;
+ break;
+ default:
+ SI_PRINT(("Chipid mapping not found\n"));
+ break;
+ }
+
+ if (!axi2coreidx)
+ return (BADIDX);
+
+ coreid = axi2coreidx[axiid].coreid;
+ coreunit = axi2coreidx[axiid].coreunit;
+
+ return si_findcoreidx(sih, coreid, coreunit);
+
+}
+
+/*
+ * This API polls all slave wrappers for errors and returns bit map of
+ * all reported errors.
+ * return - bit map of
+ * AXI_WRAP_STS_NONE
+ * AXI_WRAP_STS_TIMEOUT
+ * AXI_WRAP_STS_SLAVE_ERR
+ * AXI_WRAP_STS_DECODE_ERR
+ * AXI_WRAP_STS_PCI_RD_ERR
+ * AXI_WRAP_STS_WRAP_RD_ERR
+ * AXI_WRAP_STS_SET_CORE_FAIL
+ * On timeout detection, correspondign bridge will be reset to
+ * unblock the bus.
+ * Error reported in each wrapper can be retrieved using the API
+ * si_get_axi_errlog_info()
+ */
+uint32
+BCMPOSTTRAPFN(ai_clear_backplane_to)(si_t *sih)
+{
+ uint32 ret = 0;
+#if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
+ const si_info_t *sii = SI_INFO(sih);
+ aidmp_t *ai;
+ uint32 i;
+ axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
+
+#ifdef AXI_TIMEOUTS_NIC
+ uint32 prev_value = 0;
+ osl_t *osh = sii->osh;
+ uint32 cfg_reg = 0;
+ uint32 offset = 0;
+
+ if ((sii->axi_num_wrappers == 0) || (!PCIE(sii)))
+#else
+ if (sii->axi_num_wrappers == 0)
+#endif
+ {
+ SI_VMSG(("ai_clear_backplane_to, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d,"
+ " ID:%x\n",
+ sii->axi_num_wrappers, PCIE(sii),
+ BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
+ return AXI_WRAP_STS_NONE;
+ }
+
+#ifdef AXI_TIMEOUTS_NIC
+ /* Save and restore wrapper access window */
+ if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+ if (PCIE_GEN1(sii)) {
+ cfg_reg = PCI_BAR0_WIN2;
+ offset = PCI_BAR0_WIN2_OFFSET;
+ } else if (PCIE_GEN2(sii)) {
+ cfg_reg = PCIE2_BAR0_CORE2_WIN2;
+ offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
+ }
+ else {
+ ASSERT(!"!PCIE_GEN1 && !PCIE_GEN2");
+ }
+
+ prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
+
+ if (prev_value == ID32_INVALID) {
+ si_axi_error_t * axi_error =
+ sih->err_info ?
+ &sih->err_info->axi_error[sih->err_info->count] :
+ NULL;
+
+ SI_PRINT(("ai_clear_backplane_to, PCI_BAR0_WIN2 - %x\n", prev_value));
+ if (axi_error) {
+ axi_error->error = ret = AXI_WRAP_STS_PCI_RD_ERR;
+ axi_error->errlog_lo = cfg_reg;
+ sih->err_info->count++;
+
+ if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) {
+ sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1;
+ SI_PRINT(("AXI Error log overflow\n"));
+ }
+ }
+
+ return ret;
+ }
+ }
+#endif /* AXI_TIMEOUTS_NIC */
+
+ for (i = 0; i < sii->axi_num_wrappers; ++i) {
+ uint32 tmp;
+
+ if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER) {
+ continue;
+ }
+
+#ifdef AXI_TIMEOUTS_NIC
+ if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+ /* Set BAR0_CORE2_WIN2 to bridge wapper base address */
+ OSL_PCI_WRITE_CONFIG(osh,
+ cfg_reg, 4, axi_wrapper[i].wrapper_addr);
+
+ /* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */
+ ai = (aidmp_t *) (DISCARD_QUAL(sii->curmap, uint8) + offset);
+ }
+ else
+#endif /* AXI_TIMEOUTS_NIC */
+ {
+ ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
+ }
+
+ tmp = ai_clear_backplane_to_per_core(sih, axi_wrapper[i].cid, 0,
+ DISCARD_QUAL(ai, void));
+
+ ret |= tmp;
+ }
+
+#ifdef AXI_TIMEOUTS_NIC
+ /* Restore the initial wrapper space */
+ if (prev_value) {
+ OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
+ }
+#endif /* AXI_TIMEOUTS_NIC */
+
+#endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */
+
+ return ret;
+}
+
+uint
+ai_num_slaveports(const si_t *sih, uint coreidx)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+ uint32 cib;
+
+ cib = cores_info->cib[coreidx];
+ return ((cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT);
+}
+
+#ifdef UART_TRAP_DBG
+void
+ai_dump_APB_Bridge_registers(const si_t *sih)
+{
+ aidmp_t *ai;
+ const si_info_t *sii = SI_INFO(sih);
+
+ ai = (aidmp_t *)sii->br_wrapba[0];
+ printf("APB Bridge 0\n");
+ printf("lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x",
+ R_REG(sii->osh, &ai->errlogaddrlo),
+ R_REG(sii->osh, &ai->errlogaddrhi),
+ R_REG(sii->osh, &ai->errlogid),
+ R_REG(sii->osh, &ai->errlogflags));
+ printf("\n status 0x%08x\n", R_REG(sii->osh, &ai->errlogstatus));
+}
+#endif /* UART_TRAP_DBG */
+
+void
+ai_force_clocks(const si_t *sih, uint clock_state)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ aidmp_t *ai, *ai_sec = NULL;
+ volatile uint32 dummy;
+ uint32 ioctrl;
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+ if (cores_info->wrapba2[sii->curidx])
+ ai_sec = REG_MAP(cores_info->wrapba2[sii->curidx], SI_CORE_SIZE);
+
+ /* ensure there are no pending backplane operations */
+ SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
+
+ if (clock_state == FORCE_CLK_ON) {
+ ioctrl = R_REG(sii->osh, &ai->ioctrl);
+ W_REG(sii->osh, &ai->ioctrl, (ioctrl | SICF_FGC));
+ dummy = R_REG(sii->osh, &ai->ioctrl);
+ BCM_REFERENCE(dummy);
+ if (ai_sec) {
+ ioctrl = R_REG(sii->osh, &ai_sec->ioctrl);
+ W_REG(sii->osh, &ai_sec->ioctrl, (ioctrl | SICF_FGC));
+ dummy = R_REG(sii->osh, &ai_sec->ioctrl);
+ BCM_REFERENCE(dummy);
+ }
+ } else {
+ ioctrl = R_REG(sii->osh, &ai->ioctrl);
+ W_REG(sii->osh, &ai->ioctrl, (ioctrl & (~SICF_FGC)));
+ dummy = R_REG(sii->osh, &ai->ioctrl);
+ BCM_REFERENCE(dummy);
+ if (ai_sec) {
+ ioctrl = R_REG(sii->osh, &ai_sec->ioctrl);
+ W_REG(sii->osh, &ai_sec->ioctrl, (ioctrl & (~SICF_FGC)));
+ dummy = R_REG(sii->osh, &ai_sec->ioctrl);
+ BCM_REFERENCE(dummy);
+ }
+ }
+ /* ensure there are no pending backplane operations */
+ SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
+}
+
+#ifdef DONGLEBUILD
+/*
+ * this is not declared as static const, although that is the right thing to do
+ * reason being if declared as static const, compile/link process would that in
+ * read only section...
+ * currently this code/array is used to identify the registers which are dumped
+ * during trap processing
+ * and usually for the trap buffer, .rodata buffer is reused, so for now just static
+*/
+static uint32 BCMPOST_TRAP_RODATA(wrapper_offsets_to_dump)[] = {
+ OFFSETOF(aidmp_t, ioctrlset),
+ OFFSETOF(aidmp_t, ioctrlclear),
+ OFFSETOF(aidmp_t, ioctrl),
+ OFFSETOF(aidmp_t, iostatus),
+ OFFSETOF(aidmp_t, ioctrlwidth),
+ OFFSETOF(aidmp_t, iostatuswidth),
+ OFFSETOF(aidmp_t, resetctrl),
+ OFFSETOF(aidmp_t, resetstatus),
+ OFFSETOF(aidmp_t, resetreadid),
+ OFFSETOF(aidmp_t, resetwriteid),
+ OFFSETOF(aidmp_t, errlogctrl),
+ OFFSETOF(aidmp_t, errlogdone),
+ OFFSETOF(aidmp_t, errlogstatus),
+ OFFSETOF(aidmp_t, errlogaddrlo),
+ OFFSETOF(aidmp_t, errlogaddrhi),
+ OFFSETOF(aidmp_t, errlogid),
+ OFFSETOF(aidmp_t, errloguser),
+ OFFSETOF(aidmp_t, errlogflags),
+ OFFSETOF(aidmp_t, intstatus),
+ OFFSETOF(aidmp_t, config),
+ OFFSETOF(aidmp_t, itipoobaout),
+ OFFSETOF(aidmp_t, itipoobbout),
+ OFFSETOF(aidmp_t, itipoobcout),
+ OFFSETOF(aidmp_t, itipoobdout)};
+
+#ifdef ETD
+
+/* This is used for dumping wrapper registers for etd when axierror happens.
+ * This should match with the structure hnd_ext_trap_bp_err_t
+ */
+static uint32 BCMPOST_TRAP_RODATA(etd_wrapper_offsets_axierr)[] = {
+ OFFSETOF(aidmp_t, ioctrl),
+ OFFSETOF(aidmp_t, iostatus),
+ OFFSETOF(aidmp_t, resetctrl),
+ OFFSETOF(aidmp_t, resetstatus),
+ OFFSETOF(aidmp_t, resetreadid),
+ OFFSETOF(aidmp_t, resetwriteid),
+ OFFSETOF(aidmp_t, errlogctrl),
+ OFFSETOF(aidmp_t, errlogdone),
+ OFFSETOF(aidmp_t, errlogstatus),
+ OFFSETOF(aidmp_t, errlogaddrlo),
+ OFFSETOF(aidmp_t, errlogaddrhi),
+ OFFSETOF(aidmp_t, errlogid),
+ OFFSETOF(aidmp_t, errloguser),
+ OFFSETOF(aidmp_t, errlogflags),
+ OFFSETOF(aidmp_t, itipoobaout),
+ OFFSETOF(aidmp_t, itipoobbout),
+ OFFSETOF(aidmp_t, itipoobcout),
+ OFFSETOF(aidmp_t, itipoobdout)};
+#endif /* ETD */
+
+/* wrapper function to access the global array wrapper_offsets_to_dump */
+static uint32
+BCMRAMFN(ai_get_sizeof_wrapper_offsets_to_dump)(void)
+{
+ return (sizeof(wrapper_offsets_to_dump));
+}
+
+static uint32
+BCMPOSTTRAPRAMFN(ai_get_wrapper_base_addr)(uint32 **offset)
+{
+ uint32 arr_size = ARRAYSIZE(wrapper_offsets_to_dump);
+
+ *offset = &wrapper_offsets_to_dump[0];
+ return arr_size;
+}
+
+uint32
+BCMATTACHFN(ai_wrapper_dump_buf_size)(const si_t *sih)
+{
+ uint32 buf_size = 0;
+ uint32 wrapper_count = 0;
+ const si_info_t *sii = SI_INFO(sih);
+
+ wrapper_count = sii->axi_num_wrappers;
+ if (wrapper_count == 0)
+ return 0;
+
+ /* cnt indicates how many registers, tag_id 0 will say these are address/value */
+ /* address/value pairs */
+ buf_size += 2 * (ai_get_sizeof_wrapper_offsets_to_dump() * wrapper_count);
+
+ return buf_size;
+}
+
+static uint32*
+BCMPOSTTRAPFN(ai_wrapper_dump_binary_one)(const si_info_t *sii, uint32 *p32, uint32 wrap_ba)
+{
+ uint i;
+ uint32 *addr;
+ uint32 arr_size;
+ uint32 *offset_base;
+
+ arr_size = ai_get_wrapper_base_addr(&offset_base);
+
+ for (i = 0; i < arr_size; i++) {
+ addr = (uint32 *)(wrap_ba + *(offset_base + i));
+ *p32++ = (uint32)addr;
+ *p32++ = R_REG(sii->osh, addr);
+ }
+ return p32;
+}
+
+#if defined(ETD)
+static uint32
+BCMPOSTTRAPRAMFN(ai_get_wrapper_base_addr_etd_axierr)(uint32 **offset)
+{
+ uint32 arr_size = ARRAYSIZE(etd_wrapper_offsets_axierr);
+
+ *offset = &etd_wrapper_offsets_axierr[0];
+ return arr_size;
+}
+
+uint32
+BCMPOSTTRAPFN(ai_wrapper_dump_last_timeout)(const si_t *sih, uint32 *error, uint32 *core,
+ uint32 *ba, uchar *p)
+{
+#if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
+ uint32 *p32;
+ uint32 wrap_ba = last_axi_error_wrap;
+ uint i;
+ uint32 *addr;
+
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (last_axi_error != AXI_WRAP_STS_NONE)
+ {
+ if (wrap_ba)
+ {
+ p32 = (uint32 *)p;
+ uint32 arr_size;
+ uint32 *offset_base;
+
+ arr_size = ai_get_wrapper_base_addr_etd_axierr(&offset_base);
+ for (i = 0; i < arr_size; i++) {
+ addr = (uint32 *)(wrap_ba + *(offset_base + i));
+ *p32++ = R_REG(sii->osh, addr);
+ }
+ }
+ *error = last_axi_error;
+ *core = last_axi_error_core;
+ *ba = wrap_ba;
+ }
+#else
+ *error = 0;
+ *core = 0;
+ *ba = 0;
+#endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */
+ return 0;
+}
+#endif /* ETD */
+
+uint32
+BCMPOSTTRAPFN(ai_wrapper_dump_binary)(const si_t *sih, uchar *p)
+{
+ uint32 *p32 = (uint32 *)p;
+ uint32 i;
+ const si_info_t *sii = SI_INFO(sih);
+
+ for (i = 0; i < sii->axi_num_wrappers; i++) {
+ p32 = ai_wrapper_dump_binary_one(sii, p32, sii->axi_wrapper[i].wrapper_addr);
+ }
+ return 0;
+}
+
+bool
+BCMPOSTTRAPFN(ai_check_enable_backplane_log)(const si_t *sih)
+{
+#if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
+ if (g_disable_backplane_logs) {
+ return FALSE;
+ }
+ else {
+ return TRUE;
+ }
+#else /* (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) */
+ return FALSE;
+#endif /* (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) */
+}
+#endif /* DONGLEBUILD */
diff --git a/bcmdhd.101.10.361.x/bcm_app_utils.c b/bcmdhd.101.10.361.x/bcm_app_utils.c
new file mode 100755
index 0000000..62d0507
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcm_app_utils.c
@@ -0,0 +1,1276 @@
+/*
+ * Misc utility routines used by kernel or app-level.
+ * Contents are wifi-specific, used by any kernel or app-level
+ * software that might want wifi things as it grows.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#include <typedefs.h>
+
+#ifdef BCMDRIVER
+#include <osl.h>
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+#define tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c))
+#else /* BCMDRIVER */
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <ctype.h>
+#ifndef ASSERT
+#define ASSERT(exp)
+#endif
+#endif /* BCMDRIVER */
+#include <bcmwifi_channels.h>
+
+#if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL))
+#include <bcmstdlib.h> /* For wlexe/Makefile.wlm_dll */
+#endif
+
+#include <bcmutils.h>
+#include <wlioctl.h>
+#include <wlioctl_utils.h>
+
+#ifndef BCMDRIVER
+/* Take an array of measurments representing a single channel over time and return
+ a summary. Currently implemented as a simple average but could easily evolve
+ into more cpomplex alogrithms.
+*/
+cca_congest_channel_req_t *
+cca_per_chan_summary(cca_congest_channel_req_t *input, cca_congest_channel_req_t *avg, bool percent)
+{
+ int sec;
+ cca_congest_t totals;
+
+ totals.duration = 0;
+ totals.congest_ibss = 0;
+ totals.congest_obss = 0;
+ totals.interference = 0;
+ avg->num_secs = 0;
+
+ for (sec = 0; sec < input->num_secs; sec++) {
+ if (input->secs[sec].duration) {
+ totals.duration += input->secs[sec].duration;
+ totals.congest_ibss += input->secs[sec].congest_ibss;
+ totals.congest_obss += input->secs[sec].congest_obss;
+ totals.interference += input->secs[sec].interference;
+ avg->num_secs++;
+ }
+ }
+ avg->chanspec = input->chanspec;
+
+ if (!avg->num_secs || !totals.duration)
+ return (avg);
+
+ if (percent) {
+ avg->secs[0].duration = totals.duration / avg->num_secs;
+ avg->secs[0].congest_ibss = totals.congest_ibss * 100/totals.duration;
+ avg->secs[0].congest_obss = totals.congest_obss * 100/totals.duration;
+ avg->secs[0].interference = totals.interference * 100/totals.duration;
+ } else {
+ avg->secs[0].duration = totals.duration / avg->num_secs;
+ avg->secs[0].congest_ibss = totals.congest_ibss / avg->num_secs;
+ avg->secs[0].congest_obss = totals.congest_obss / avg->num_secs;
+ avg->secs[0].interference = totals.interference / avg->num_secs;
+ }
+
+ return (avg);
+}
+
+static void
+cca_info(uint8 *bitmap, int num_bits, int *left, int *bit_pos)
+{
+ int i;
+ for (*left = 0, i = 0; i < num_bits; i++) {
+ if (isset(bitmap, i)) {
+ (*left)++;
+ *bit_pos = i;
+ }
+ }
+}
+
+static uint8
+spec_to_chan(chanspec_t chspec)
+{
+ uint8 center_ch, edge, primary, sb;
+
+ center_ch = CHSPEC_CHANNEL(chspec);
+
+ if (CHSPEC_IS20(chspec)) {
+ return center_ch;
+ } else {
+ /* the lower edge of the wide channel is half the bw from
+ * the center channel.
+ */
+ if (CHSPEC_IS40(chspec)) {
+ edge = center_ch - CH_20MHZ_APART;
+ } else {
+ /* must be 80MHz (until we support more) */
+ ASSERT(CHSPEC_IS80(chspec));
+ edge = center_ch - CH_40MHZ_APART;
+ }
+
+ /* find the channel number of the lowest 20MHz primary channel */
+ primary = edge + CH_10MHZ_APART;
+
+ /* select the actual subband */
+ sb = (chspec & WL_CHANSPEC_CTL_SB_MASK) >> WL_CHANSPEC_CTL_SB_SHIFT;
+ primary = primary + sb * CH_20MHZ_APART;
+
+ return primary;
+ }
+}
+
+/*
+ Take an array of measumrements representing summaries of different channels.
+ Return a recomended channel.
+ Interference is evil, get rid of that first.
+ Then hunt for lowest Other bss traffic.
+ Don't forget that channels with low duration times may not have accurate readings.
+ For the moment, do not overwrite input array.
+*/
+int
+cca_analyze(cca_congest_channel_req_t *input[], int num_chans, uint flags, chanspec_t *answer)
+{
+ uint8 *bitmap = NULL; /* 38 Max channels needs 5 bytes = 40 */
+ int i, left, winner, ret_val = 0;
+ uint32 min_obss = 1 << 30;
+ uint bitmap_sz;
+
+ bitmap_sz = CEIL(num_chans, NBBY);
+ bitmap = (uint8 *)malloc(bitmap_sz);
+ if (bitmap == NULL) {
+ printf("unable to allocate memory\n");
+ return BCME_NOMEM;
+ }
+
+ memset(bitmap, 0, bitmap_sz);
+ /* Initially, all channels are up for consideration */
+ for (i = 0; i < num_chans; i++) {
+ if (input[i]->chanspec)
+ setbit(bitmap, i);
+ }
+ cca_info(bitmap, num_chans, &left, &i);
+ if (!left) {
+ ret_val = CCA_ERRNO_TOO_FEW;
+ goto f_exit;
+ }
+
+ /* Filter for 2.4 GHz Band */
+ if (flags & CCA_FLAG_2G_ONLY) {
+ for (i = 0; i < num_chans; i++) {
+ if (!CHSPEC_IS2G(input[i]->chanspec))
+ clrbit(bitmap, i);
+ }
+ }
+ cca_info(bitmap, num_chans, &left, &i);
+ if (!left) {
+ ret_val = CCA_ERRNO_BAND;
+ goto f_exit;
+ }
+
+ /* Filter for 5 GHz Band */
+ if (flags & CCA_FLAG_5G_ONLY) {
+ for (i = 0; i < num_chans; i++) {
+ if (!CHSPEC_IS5G(input[i]->chanspec))
+ clrbit(bitmap, i);
+ }
+ }
+ cca_info(bitmap, num_chans, &left, &i);
+ if (!left) {
+ ret_val = CCA_ERRNO_BAND;
+ goto f_exit;
+ }
+
+ /* Filter for Duration */
+ if (!(flags & CCA_FLAG_IGNORE_DURATION)) {
+ for (i = 0; i < num_chans; i++) {
+ if (input[i]->secs[0].duration < CCA_THRESH_MILLI)
+ clrbit(bitmap, i);
+ }
+ }
+ cca_info(bitmap, num_chans, &left, &i);
+ if (!left) {
+ ret_val = CCA_ERRNO_DURATION;
+ goto f_exit;
+ }
+
+ /* Filter for 1 6 11 on 2.4 Band */
+ if (flags & CCA_FLAGS_PREFER_1_6_11) {
+ int tmp_channel = spec_to_chan(input[i]->chanspec);
+ int is2g = CHSPEC_IS2G(input[i]->chanspec);
+ for (i = 0; i < num_chans; i++) {
+ if (is2g && tmp_channel != 1 && tmp_channel != 6 && tmp_channel != 11)
+ clrbit(bitmap, i);
+ }
+ }
+ cca_info(bitmap, num_chans, &left, &i);
+ if (!left) {
+ ret_val = CCA_ERRNO_PREF_CHAN;
+ goto f_exit;
+ }
+
+ /* Toss high interference interference */
+ if (!(flags & CCA_FLAG_IGNORE_INTERFER)) {
+ for (i = 0; i < num_chans; i++) {
+ if (input[i]->secs[0].interference > CCA_THRESH_INTERFERE)
+ clrbit(bitmap, i);
+ }
+ cca_info(bitmap, num_chans, &left, &i);
+ if (!left) {
+ ret_val = CCA_ERRNO_INTERFER;
+ goto f_exit;
+ }
+ }
+
+ /* Now find lowest obss */
+ winner = 0;
+ for (i = 0; i < num_chans; i++) {
+ if (isset(bitmap, i) && input[i]->secs[0].congest_obss < min_obss) {
+ winner = i;
+ min_obss = input[i]->secs[0].congest_obss;
+ }
+ }
+ *answer = input[winner]->chanspec;
+ f_exit:
+ free(bitmap); /* free the allocated memory for bitmap */
+ return ret_val;
+}
+#endif /* !BCMDRIVER */
+
+/* offset of cntmember by sizeof(uint32) from the first cnt variable, txframe. */
+#define IDX_IN_WL_CNT_VER_6_T(cntmember) \
+ ((OFFSETOF(wl_cnt_ver_6_t, cntmember) - OFFSETOF(wl_cnt_ver_6_t, txframe)) / sizeof(uint32))
+
+#define IDX_IN_WL_CNT_VER_7_T(cntmember) \
+ ((OFFSETOF(wl_cnt_ver_7_t, cntmember) - OFFSETOF(wl_cnt_ver_7_t, txframe)) / sizeof(uint32))
+
+#define IDX_IN_WL_CNT_VER_11_T(cntmember) \
+ ((OFFSETOF(wl_cnt_ver_11_t, cntmember) - OFFSETOF(wl_cnt_ver_11_t, txframe)) \
+ / sizeof(uint32))
+
+/* Exclude version and length fields */
+#define NUM_OF_CNT_IN_WL_CNT_VER_6_T \
+ ((sizeof(wl_cnt_ver_6_t) - 2 * sizeof(uint16)) / sizeof(uint32))
+/* Exclude macstat cnt variables. wl_cnt_ver_6_t only has 62 macstat cnt variables. */
+#define NUM_OF_WLCCNT_IN_WL_CNT_VER_6_T \
+ (NUM_OF_CNT_IN_WL_CNT_VER_6_T - (WL_CNT_MCST_VAR_NUM - 2))
+
+/* Exclude version and length fields */
+#define NUM_OF_CNT_IN_WL_CNT_VER_7_T \
+ ((sizeof(wl_cnt_ver_7_t) - 2 * sizeof(uint16)) / sizeof(uint32))
+
+/* Exclude version and length fields */
+#define NUM_OF_CNT_IN_WL_CNT_VER_11_T \
+ ((sizeof(wl_cnt_ver_11_t) - 2 * sizeof(uint16)) / sizeof(uint32))
+/* Exclude 64 macstat cnt variables. */
+#define NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T \
+ ((sizeof(wl_cnt_wlc_t)) / sizeof(uint32))
+
+/* Index conversion table from wl_cnt_ver_6_t to wl_cnt_wlc_t */
+static const uint8 wlcntver6t_to_wlcntwlct[NUM_OF_WLCCNT_IN_WL_CNT_VER_6_T] = {
+ IDX_IN_WL_CNT_VER_6_T(txframe),
+ IDX_IN_WL_CNT_VER_6_T(txbyte),
+ IDX_IN_WL_CNT_VER_6_T(txretrans),
+ IDX_IN_WL_CNT_VER_6_T(txerror),
+ IDX_IN_WL_CNT_VER_6_T(txctl),
+ IDX_IN_WL_CNT_VER_6_T(txprshort),
+ IDX_IN_WL_CNT_VER_6_T(txserr),
+ IDX_IN_WL_CNT_VER_6_T(txnobuf),
+ IDX_IN_WL_CNT_VER_6_T(txnoassoc),
+ IDX_IN_WL_CNT_VER_6_T(txrunt),
+ IDX_IN_WL_CNT_VER_6_T(txchit),
+ IDX_IN_WL_CNT_VER_6_T(txcmiss),
+ IDX_IN_WL_CNT_VER_6_T(txuflo),
+ IDX_IN_WL_CNT_VER_6_T(txphyerr),
+ IDX_IN_WL_CNT_VER_6_T(txphycrs),
+ IDX_IN_WL_CNT_VER_6_T(rxframe),
+ IDX_IN_WL_CNT_VER_6_T(rxbyte),
+ IDX_IN_WL_CNT_VER_6_T(rxerror),
+ IDX_IN_WL_CNT_VER_6_T(rxctl),
+ IDX_IN_WL_CNT_VER_6_T(rxnobuf),
+ IDX_IN_WL_CNT_VER_6_T(rxnondata),
+ IDX_IN_WL_CNT_VER_6_T(rxbadds),
+ IDX_IN_WL_CNT_VER_6_T(rxbadcm),
+ IDX_IN_WL_CNT_VER_6_T(rxfragerr),
+ IDX_IN_WL_CNT_VER_6_T(rxrunt),
+ IDX_IN_WL_CNT_VER_6_T(rxgiant),
+ IDX_IN_WL_CNT_VER_6_T(rxnoscb),
+ IDX_IN_WL_CNT_VER_6_T(rxbadproto),
+ IDX_IN_WL_CNT_VER_6_T(rxbadsrcmac),
+ IDX_IN_WL_CNT_VER_6_T(rxbadda),
+ IDX_IN_WL_CNT_VER_6_T(rxfilter),
+ IDX_IN_WL_CNT_VER_6_T(rxoflo),
+ IDX_IN_WL_CNT_VER_6_T(rxuflo),
+ IDX_IN_WL_CNT_VER_6_T(rxuflo) + 1,
+ IDX_IN_WL_CNT_VER_6_T(rxuflo) + 2,
+ IDX_IN_WL_CNT_VER_6_T(rxuflo) + 3,
+ IDX_IN_WL_CNT_VER_6_T(rxuflo) + 4,
+ IDX_IN_WL_CNT_VER_6_T(rxuflo) + 5,
+ IDX_IN_WL_CNT_VER_6_T(d11cnt_txrts_off),
+ IDX_IN_WL_CNT_VER_6_T(d11cnt_rxcrc_off),
+ IDX_IN_WL_CNT_VER_6_T(d11cnt_txnocts_off),
+ IDX_IN_WL_CNT_VER_6_T(dmade),
+ IDX_IN_WL_CNT_VER_6_T(dmada),
+ IDX_IN_WL_CNT_VER_6_T(dmape),
+ IDX_IN_WL_CNT_VER_6_T(reset),
+ IDX_IN_WL_CNT_VER_6_T(tbtt),
+ IDX_IN_WL_CNT_VER_6_T(txdmawar),
+ IDX_IN_WL_CNT_VER_6_T(pkt_callback_reg_fail),
+ IDX_IN_WL_CNT_VER_6_T(txfrag),
+ IDX_IN_WL_CNT_VER_6_T(txmulti),
+ IDX_IN_WL_CNT_VER_6_T(txfail),
+ IDX_IN_WL_CNT_VER_6_T(txretry),
+ IDX_IN_WL_CNT_VER_6_T(txretrie),
+ IDX_IN_WL_CNT_VER_6_T(rxdup),
+ IDX_IN_WL_CNT_VER_6_T(txrts),
+ IDX_IN_WL_CNT_VER_6_T(txnocts),
+ IDX_IN_WL_CNT_VER_6_T(txnoack),
+ IDX_IN_WL_CNT_VER_6_T(rxfrag),
+ IDX_IN_WL_CNT_VER_6_T(rxmulti),
+ IDX_IN_WL_CNT_VER_6_T(rxcrc),
+ IDX_IN_WL_CNT_VER_6_T(txfrmsnt),
+ IDX_IN_WL_CNT_VER_6_T(rxundec),
+ IDX_IN_WL_CNT_VER_6_T(tkipmicfaill),
+ IDX_IN_WL_CNT_VER_6_T(tkipcntrmsr),
+ IDX_IN_WL_CNT_VER_6_T(tkipreplay),
+ IDX_IN_WL_CNT_VER_6_T(ccmpfmterr),
+ IDX_IN_WL_CNT_VER_6_T(ccmpreplay),
+ IDX_IN_WL_CNT_VER_6_T(ccmpundec),
+ IDX_IN_WL_CNT_VER_6_T(fourwayfail),
+ IDX_IN_WL_CNT_VER_6_T(wepundec),
+ IDX_IN_WL_CNT_VER_6_T(wepicverr),
+ IDX_IN_WL_CNT_VER_6_T(decsuccess),
+ IDX_IN_WL_CNT_VER_6_T(tkipicverr),
+ IDX_IN_WL_CNT_VER_6_T(wepexcluded),
+ IDX_IN_WL_CNT_VER_6_T(txchanrej),
+ IDX_IN_WL_CNT_VER_6_T(psmwds),
+ IDX_IN_WL_CNT_VER_6_T(phywatchdog),
+ IDX_IN_WL_CNT_VER_6_T(prq_entries_handled),
+ IDX_IN_WL_CNT_VER_6_T(prq_undirected_entries),
+ IDX_IN_WL_CNT_VER_6_T(prq_bad_entries),
+ IDX_IN_WL_CNT_VER_6_T(atim_suppress_count),
+ IDX_IN_WL_CNT_VER_6_T(bcn_template_not_ready),
+ IDX_IN_WL_CNT_VER_6_T(bcn_template_not_ready_done),
+ IDX_IN_WL_CNT_VER_6_T(late_tbtt_dpc),
+ IDX_IN_WL_CNT_VER_6_T(rx1mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx2mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx5mbps5),
+ IDX_IN_WL_CNT_VER_6_T(rx6mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx9mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx11mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx12mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx18mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx24mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx36mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx48mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx54mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx108mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx162mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx216mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx270mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx324mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx378mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx432mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx486mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx540mbps),
+ IDX_IN_WL_CNT_VER_6_T(rfdisable),
+ IDX_IN_WL_CNT_VER_6_T(txexptime),
+ IDX_IN_WL_CNT_VER_6_T(txmpdu_sgi),
+ IDX_IN_WL_CNT_VER_6_T(rxmpdu_sgi),
+ IDX_IN_WL_CNT_VER_6_T(txmpdu_stbc),
+ IDX_IN_WL_CNT_VER_6_T(rxmpdu_stbc),
+ IDX_IN_WL_CNT_VER_6_T(rxundec_mcst),
+ IDX_IN_WL_CNT_VER_6_T(tkipmicfaill_mcst),
+ IDX_IN_WL_CNT_VER_6_T(tkipcntrmsr_mcst),
+ IDX_IN_WL_CNT_VER_6_T(tkipreplay_mcst),
+ IDX_IN_WL_CNT_VER_6_T(ccmpfmterr_mcst),
+ IDX_IN_WL_CNT_VER_6_T(ccmpreplay_mcst),
+ IDX_IN_WL_CNT_VER_6_T(ccmpundec_mcst),
+ IDX_IN_WL_CNT_VER_6_T(fourwayfail_mcst),
+ IDX_IN_WL_CNT_VER_6_T(wepundec_mcst),
+ IDX_IN_WL_CNT_VER_6_T(wepicverr_mcst),
+ IDX_IN_WL_CNT_VER_6_T(decsuccess_mcst),
+ IDX_IN_WL_CNT_VER_6_T(tkipicverr_mcst),
+ IDX_IN_WL_CNT_VER_6_T(wepexcluded_mcst)
+};
+
+#define INVALID_IDX ((uint8)(-1))
+
+/* Index conversion table from wl_cnt_ver_7_t to wl_cnt_wlc_t */
+static const uint8 wlcntver7t_to_wlcntwlct[] = {
+ IDX_IN_WL_CNT_VER_7_T(txframe),
+ IDX_IN_WL_CNT_VER_7_T(txbyte),
+ IDX_IN_WL_CNT_VER_7_T(txretrans),
+ IDX_IN_WL_CNT_VER_7_T(txerror),
+ IDX_IN_WL_CNT_VER_7_T(txctl),
+ IDX_IN_WL_CNT_VER_7_T(txprshort),
+ IDX_IN_WL_CNT_VER_7_T(txserr),
+ IDX_IN_WL_CNT_VER_7_T(txnobuf),
+ IDX_IN_WL_CNT_VER_7_T(txnoassoc),
+ IDX_IN_WL_CNT_VER_7_T(txrunt),
+ IDX_IN_WL_CNT_VER_7_T(txchit),
+ IDX_IN_WL_CNT_VER_7_T(txcmiss),
+ IDX_IN_WL_CNT_VER_7_T(txuflo),
+ IDX_IN_WL_CNT_VER_7_T(txphyerr),
+ IDX_IN_WL_CNT_VER_7_T(txphycrs),
+ IDX_IN_WL_CNT_VER_7_T(rxframe),
+ IDX_IN_WL_CNT_VER_7_T(rxbyte),
+ IDX_IN_WL_CNT_VER_7_T(rxerror),
+ IDX_IN_WL_CNT_VER_7_T(rxctl),
+ IDX_IN_WL_CNT_VER_7_T(rxnobuf),
+ IDX_IN_WL_CNT_VER_7_T(rxnondata),
+ IDX_IN_WL_CNT_VER_7_T(rxbadds),
+ IDX_IN_WL_CNT_VER_7_T(rxbadcm),
+ IDX_IN_WL_CNT_VER_7_T(rxfragerr),
+ IDX_IN_WL_CNT_VER_7_T(rxrunt),
+ IDX_IN_WL_CNT_VER_7_T(rxgiant),
+ IDX_IN_WL_CNT_VER_7_T(rxnoscb),
+ IDX_IN_WL_CNT_VER_7_T(rxbadproto),
+ IDX_IN_WL_CNT_VER_7_T(rxbadsrcmac),
+ IDX_IN_WL_CNT_VER_7_T(rxbadda),
+ IDX_IN_WL_CNT_VER_7_T(rxfilter),
+ IDX_IN_WL_CNT_VER_7_T(rxoflo),
+ IDX_IN_WL_CNT_VER_7_T(rxuflo),
+ IDX_IN_WL_CNT_VER_7_T(rxuflo) + 1,
+ IDX_IN_WL_CNT_VER_7_T(rxuflo) + 2,
+ IDX_IN_WL_CNT_VER_7_T(rxuflo) + 3,
+ IDX_IN_WL_CNT_VER_7_T(rxuflo) + 4,
+ IDX_IN_WL_CNT_VER_7_T(rxuflo) + 5,
+ IDX_IN_WL_CNT_VER_7_T(d11cnt_txrts_off),
+ IDX_IN_WL_CNT_VER_7_T(d11cnt_rxcrc_off),
+ IDX_IN_WL_CNT_VER_7_T(d11cnt_txnocts_off),
+ IDX_IN_WL_CNT_VER_7_T(dmade),
+ IDX_IN_WL_CNT_VER_7_T(dmada),
+ IDX_IN_WL_CNT_VER_7_T(dmape),
+ IDX_IN_WL_CNT_VER_7_T(reset),
+ IDX_IN_WL_CNT_VER_7_T(tbtt),
+ IDX_IN_WL_CNT_VER_7_T(txdmawar),
+ IDX_IN_WL_CNT_VER_7_T(pkt_callback_reg_fail),
+ IDX_IN_WL_CNT_VER_7_T(txfrag),
+ IDX_IN_WL_CNT_VER_7_T(txmulti),
+ IDX_IN_WL_CNT_VER_7_T(txfail),
+ IDX_IN_WL_CNT_VER_7_T(txretry),
+ IDX_IN_WL_CNT_VER_7_T(txretrie),
+ IDX_IN_WL_CNT_VER_7_T(rxdup),
+ IDX_IN_WL_CNT_VER_7_T(txrts),
+ IDX_IN_WL_CNT_VER_7_T(txnocts),
+ IDX_IN_WL_CNT_VER_7_T(txnoack),
+ IDX_IN_WL_CNT_VER_7_T(rxfrag),
+ IDX_IN_WL_CNT_VER_7_T(rxmulti),
+ IDX_IN_WL_CNT_VER_7_T(rxcrc),
+ IDX_IN_WL_CNT_VER_7_T(txfrmsnt),
+ IDX_IN_WL_CNT_VER_7_T(rxundec),
+ IDX_IN_WL_CNT_VER_7_T(tkipmicfaill),
+ IDX_IN_WL_CNT_VER_7_T(tkipcntrmsr),
+ IDX_IN_WL_CNT_VER_7_T(tkipreplay),
+ IDX_IN_WL_CNT_VER_7_T(ccmpfmterr),
+ IDX_IN_WL_CNT_VER_7_T(ccmpreplay),
+ IDX_IN_WL_CNT_VER_7_T(ccmpundec),
+ IDX_IN_WL_CNT_VER_7_T(fourwayfail),
+ IDX_IN_WL_CNT_VER_7_T(wepundec),
+ IDX_IN_WL_CNT_VER_7_T(wepicverr),
+ IDX_IN_WL_CNT_VER_7_T(decsuccess),
+ IDX_IN_WL_CNT_VER_7_T(tkipicverr),
+ IDX_IN_WL_CNT_VER_7_T(wepexcluded),
+ IDX_IN_WL_CNT_VER_7_T(txchanrej),
+ IDX_IN_WL_CNT_VER_7_T(psmwds),
+ IDX_IN_WL_CNT_VER_7_T(phywatchdog),
+ IDX_IN_WL_CNT_VER_7_T(prq_entries_handled),
+ IDX_IN_WL_CNT_VER_7_T(prq_undirected_entries),
+ IDX_IN_WL_CNT_VER_7_T(prq_bad_entries),
+ IDX_IN_WL_CNT_VER_7_T(atim_suppress_count),
+ IDX_IN_WL_CNT_VER_7_T(bcn_template_not_ready),
+ IDX_IN_WL_CNT_VER_7_T(bcn_template_not_ready_done),
+ IDX_IN_WL_CNT_VER_7_T(late_tbtt_dpc),
+ IDX_IN_WL_CNT_VER_7_T(rx1mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx2mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx5mbps5),
+ IDX_IN_WL_CNT_VER_7_T(rx6mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx9mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx11mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx12mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx18mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx24mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx36mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx48mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx54mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx108mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx162mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx216mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx270mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx324mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx378mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx432mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx486mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx540mbps),
+ IDX_IN_WL_CNT_VER_7_T(rfdisable),
+ IDX_IN_WL_CNT_VER_7_T(txexptime),
+ IDX_IN_WL_CNT_VER_7_T(txmpdu_sgi),
+ IDX_IN_WL_CNT_VER_7_T(rxmpdu_sgi),
+ IDX_IN_WL_CNT_VER_7_T(txmpdu_stbc),
+ IDX_IN_WL_CNT_VER_7_T(rxmpdu_stbc),
+ IDX_IN_WL_CNT_VER_7_T(rxundec_mcst),
+ IDX_IN_WL_CNT_VER_7_T(tkipmicfaill_mcst),
+ IDX_IN_WL_CNT_VER_7_T(tkipcntrmsr_mcst),
+ IDX_IN_WL_CNT_VER_7_T(tkipreplay_mcst),
+ IDX_IN_WL_CNT_VER_7_T(ccmpfmterr_mcst),
+ IDX_IN_WL_CNT_VER_7_T(ccmpreplay_mcst),
+ IDX_IN_WL_CNT_VER_7_T(ccmpundec_mcst),
+ IDX_IN_WL_CNT_VER_7_T(fourwayfail_mcst),
+ IDX_IN_WL_CNT_VER_7_T(wepundec_mcst),
+ IDX_IN_WL_CNT_VER_7_T(wepicverr_mcst),
+ IDX_IN_WL_CNT_VER_7_T(decsuccess_mcst),
+ IDX_IN_WL_CNT_VER_7_T(tkipicverr_mcst),
+ IDX_IN_WL_CNT_VER_7_T(wepexcluded_mcst),
+ IDX_IN_WL_CNT_VER_7_T(dma_hang),
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ IDX_IN_WL_CNT_VER_7_T(rxrtry)
+};
+
+/* Max wl_cnt_wlc_t fields including rxrtry */
+#define NUM_OF_WLCCNT_IN_WL_CNT_VER_7_T \
+ (sizeof(wlcntver7t_to_wlcntwlct) / sizeof(uint8))
+
+/* Index conversion table from wl_cnt_ver_11_t to wl_cnt_wlc_t */
+static const uint8 wlcntver11t_to_wlcntwlct[NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T] = {
+ IDX_IN_WL_CNT_VER_11_T(txframe),
+ IDX_IN_WL_CNT_VER_11_T(txbyte),
+ IDX_IN_WL_CNT_VER_11_T(txretrans),
+ IDX_IN_WL_CNT_VER_11_T(txerror),
+ IDX_IN_WL_CNT_VER_11_T(txctl),
+ IDX_IN_WL_CNT_VER_11_T(txprshort),
+ IDX_IN_WL_CNT_VER_11_T(txserr),
+ IDX_IN_WL_CNT_VER_11_T(txnobuf),
+ IDX_IN_WL_CNT_VER_11_T(txnoassoc),
+ IDX_IN_WL_CNT_VER_11_T(txrunt),
+ IDX_IN_WL_CNT_VER_11_T(txchit),
+ IDX_IN_WL_CNT_VER_11_T(txcmiss),
+ IDX_IN_WL_CNT_VER_11_T(txuflo),
+ IDX_IN_WL_CNT_VER_11_T(txphyerr),
+ IDX_IN_WL_CNT_VER_11_T(txphycrs),
+ IDX_IN_WL_CNT_VER_11_T(rxframe),
+ IDX_IN_WL_CNT_VER_11_T(rxbyte),
+ IDX_IN_WL_CNT_VER_11_T(rxerror),
+ IDX_IN_WL_CNT_VER_11_T(rxctl),
+ IDX_IN_WL_CNT_VER_11_T(rxnobuf),
+ IDX_IN_WL_CNT_VER_11_T(rxnondata),
+ IDX_IN_WL_CNT_VER_11_T(rxbadds),
+ IDX_IN_WL_CNT_VER_11_T(rxbadcm),
+ IDX_IN_WL_CNT_VER_11_T(rxfragerr),
+ IDX_IN_WL_CNT_VER_11_T(rxrunt),
+ IDX_IN_WL_CNT_VER_11_T(rxgiant),
+ IDX_IN_WL_CNT_VER_11_T(rxnoscb),
+ IDX_IN_WL_CNT_VER_11_T(rxbadproto),
+ IDX_IN_WL_CNT_VER_11_T(rxbadsrcmac),
+ IDX_IN_WL_CNT_VER_11_T(rxbadda),
+ IDX_IN_WL_CNT_VER_11_T(rxfilter),
+ IDX_IN_WL_CNT_VER_11_T(rxoflo),
+ IDX_IN_WL_CNT_VER_11_T(rxuflo),
+ IDX_IN_WL_CNT_VER_11_T(rxuflo) + 1,
+ IDX_IN_WL_CNT_VER_11_T(rxuflo) + 2,
+ IDX_IN_WL_CNT_VER_11_T(rxuflo) + 3,
+ IDX_IN_WL_CNT_VER_11_T(rxuflo) + 4,
+ IDX_IN_WL_CNT_VER_11_T(rxuflo) + 5,
+ IDX_IN_WL_CNT_VER_11_T(d11cnt_txrts_off),
+ IDX_IN_WL_CNT_VER_11_T(d11cnt_rxcrc_off),
+ IDX_IN_WL_CNT_VER_11_T(d11cnt_txnocts_off),
+ IDX_IN_WL_CNT_VER_11_T(dmade),
+ IDX_IN_WL_CNT_VER_11_T(dmada),
+ IDX_IN_WL_CNT_VER_11_T(dmape),
+ IDX_IN_WL_CNT_VER_11_T(reset),
+ IDX_IN_WL_CNT_VER_11_T(tbtt),
+ IDX_IN_WL_CNT_VER_11_T(txdmawar),
+ IDX_IN_WL_CNT_VER_11_T(pkt_callback_reg_fail),
+ IDX_IN_WL_CNT_VER_11_T(txfrag),
+ IDX_IN_WL_CNT_VER_11_T(txmulti),
+ IDX_IN_WL_CNT_VER_11_T(txfail),
+ IDX_IN_WL_CNT_VER_11_T(txretry),
+ IDX_IN_WL_CNT_VER_11_T(txretrie),
+ IDX_IN_WL_CNT_VER_11_T(rxdup),
+ IDX_IN_WL_CNT_VER_11_T(txrts),
+ IDX_IN_WL_CNT_VER_11_T(txnocts),
+ IDX_IN_WL_CNT_VER_11_T(txnoack),
+ IDX_IN_WL_CNT_VER_11_T(rxfrag),
+ IDX_IN_WL_CNT_VER_11_T(rxmulti),
+ IDX_IN_WL_CNT_VER_11_T(rxcrc),
+ IDX_IN_WL_CNT_VER_11_T(txfrmsnt),
+ IDX_IN_WL_CNT_VER_11_T(rxundec),
+ IDX_IN_WL_CNT_VER_11_T(tkipmicfaill),
+ IDX_IN_WL_CNT_VER_11_T(tkipcntrmsr),
+ IDX_IN_WL_CNT_VER_11_T(tkipreplay),
+ IDX_IN_WL_CNT_VER_11_T(ccmpfmterr),
+ IDX_IN_WL_CNT_VER_11_T(ccmpreplay),
+ IDX_IN_WL_CNT_VER_11_T(ccmpundec),
+ IDX_IN_WL_CNT_VER_11_T(fourwayfail),
+ IDX_IN_WL_CNT_VER_11_T(wepundec),
+ IDX_IN_WL_CNT_VER_11_T(wepicverr),
+ IDX_IN_WL_CNT_VER_11_T(decsuccess),
+ IDX_IN_WL_CNT_VER_11_T(tkipicverr),
+ IDX_IN_WL_CNT_VER_11_T(wepexcluded),
+ IDX_IN_WL_CNT_VER_11_T(txchanrej),
+ IDX_IN_WL_CNT_VER_11_T(psmwds),
+ IDX_IN_WL_CNT_VER_11_T(phywatchdog),
+ IDX_IN_WL_CNT_VER_11_T(prq_entries_handled),
+ IDX_IN_WL_CNT_VER_11_T(prq_undirected_entries),
+ IDX_IN_WL_CNT_VER_11_T(prq_bad_entries),
+ IDX_IN_WL_CNT_VER_11_T(atim_suppress_count),
+ IDX_IN_WL_CNT_VER_11_T(bcn_template_not_ready),
+ IDX_IN_WL_CNT_VER_11_T(bcn_template_not_ready_done),
+ IDX_IN_WL_CNT_VER_11_T(late_tbtt_dpc),
+ IDX_IN_WL_CNT_VER_11_T(rx1mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx2mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx5mbps5),
+ IDX_IN_WL_CNT_VER_11_T(rx6mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx9mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx11mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx12mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx18mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx24mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx36mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx48mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx54mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx108mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx162mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx216mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx270mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx324mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx378mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx432mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx486mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx540mbps),
+ IDX_IN_WL_CNT_VER_11_T(rfdisable),
+ IDX_IN_WL_CNT_VER_11_T(txexptime),
+ IDX_IN_WL_CNT_VER_11_T(txmpdu_sgi),
+ IDX_IN_WL_CNT_VER_11_T(rxmpdu_sgi),
+ IDX_IN_WL_CNT_VER_11_T(txmpdu_stbc),
+ IDX_IN_WL_CNT_VER_11_T(rxmpdu_stbc),
+ IDX_IN_WL_CNT_VER_11_T(rxundec_mcst),
+ IDX_IN_WL_CNT_VER_11_T(tkipmicfaill_mcst),
+ IDX_IN_WL_CNT_VER_11_T(tkipcntrmsr_mcst),
+ IDX_IN_WL_CNT_VER_11_T(tkipreplay_mcst),
+ IDX_IN_WL_CNT_VER_11_T(ccmpfmterr_mcst),
+ IDX_IN_WL_CNT_VER_11_T(ccmpreplay_mcst),
+ IDX_IN_WL_CNT_VER_11_T(ccmpundec_mcst),
+ IDX_IN_WL_CNT_VER_11_T(fourwayfail_mcst),
+ IDX_IN_WL_CNT_VER_11_T(wepundec_mcst),
+ IDX_IN_WL_CNT_VER_11_T(wepicverr_mcst),
+ IDX_IN_WL_CNT_VER_11_T(decsuccess_mcst),
+ IDX_IN_WL_CNT_VER_11_T(tkipicverr_mcst),
+ IDX_IN_WL_CNT_VER_11_T(wepexcluded_mcst),
+ IDX_IN_WL_CNT_VER_11_T(dma_hang),
+ IDX_IN_WL_CNT_VER_11_T(reinit),
+ IDX_IN_WL_CNT_VER_11_T(pstatxucast),
+ IDX_IN_WL_CNT_VER_11_T(pstatxnoassoc),
+ IDX_IN_WL_CNT_VER_11_T(pstarxucast),
+ IDX_IN_WL_CNT_VER_11_T(pstarxbcmc),
+ IDX_IN_WL_CNT_VER_11_T(pstatxbcmc),
+ IDX_IN_WL_CNT_VER_11_T(cso_passthrough),
+ IDX_IN_WL_CNT_VER_11_T(cso_normal),
+ IDX_IN_WL_CNT_VER_11_T(chained),
+ IDX_IN_WL_CNT_VER_11_T(chainedsz1),
+ IDX_IN_WL_CNT_VER_11_T(unchained),
+ IDX_IN_WL_CNT_VER_11_T(maxchainsz),
+ IDX_IN_WL_CNT_VER_11_T(currchainsz),
+ IDX_IN_WL_CNT_VER_11_T(pciereset),
+ IDX_IN_WL_CNT_VER_11_T(cfgrestore),
+ IDX_IN_WL_CNT_VER_11_T(reinitreason),
+ IDX_IN_WL_CNT_VER_11_T(reinitreason) + 1,
+ IDX_IN_WL_CNT_VER_11_T(reinitreason) + 2,
+ IDX_IN_WL_CNT_VER_11_T(reinitreason) + 3,
+ IDX_IN_WL_CNT_VER_11_T(reinitreason) + 4,
+ IDX_IN_WL_CNT_VER_11_T(reinitreason) + 5,
+ IDX_IN_WL_CNT_VER_11_T(reinitreason) + 6,
+ IDX_IN_WL_CNT_VER_11_T(reinitreason) + 7,
+ IDX_IN_WL_CNT_VER_11_T(rxrtry),
+ IDX_IN_WL_CNT_VER_11_T(rxmpdu_mu),
+ IDX_IN_WL_CNT_VER_11_T(txbar),
+ IDX_IN_WL_CNT_VER_11_T(rxbar),
+ IDX_IN_WL_CNT_VER_11_T(txpspoll),
+ IDX_IN_WL_CNT_VER_11_T(rxpspoll),
+ IDX_IN_WL_CNT_VER_11_T(txnull),
+ IDX_IN_WL_CNT_VER_11_T(rxnull),
+ IDX_IN_WL_CNT_VER_11_T(txqosnull),
+ IDX_IN_WL_CNT_VER_11_T(rxqosnull),
+ IDX_IN_WL_CNT_VER_11_T(txassocreq),
+ IDX_IN_WL_CNT_VER_11_T(rxassocreq),
+ IDX_IN_WL_CNT_VER_11_T(txreassocreq),
+ IDX_IN_WL_CNT_VER_11_T(rxreassocreq),
+ IDX_IN_WL_CNT_VER_11_T(txdisassoc),
+ IDX_IN_WL_CNT_VER_11_T(rxdisassoc),
+ IDX_IN_WL_CNT_VER_11_T(txassocrsp),
+ IDX_IN_WL_CNT_VER_11_T(rxassocrsp),
+ IDX_IN_WL_CNT_VER_11_T(txreassocrsp),
+ IDX_IN_WL_CNT_VER_11_T(rxreassocrsp),
+ IDX_IN_WL_CNT_VER_11_T(txauth),
+ IDX_IN_WL_CNT_VER_11_T(rxauth),
+ IDX_IN_WL_CNT_VER_11_T(txdeauth),
+ IDX_IN_WL_CNT_VER_11_T(rxdeauth),
+ IDX_IN_WL_CNT_VER_11_T(txprobereq),
+ IDX_IN_WL_CNT_VER_11_T(rxprobereq),
+ IDX_IN_WL_CNT_VER_11_T(txprobersp),
+ IDX_IN_WL_CNT_VER_11_T(rxprobersp),
+ IDX_IN_WL_CNT_VER_11_T(txaction),
+ IDX_IN_WL_CNT_VER_11_T(rxaction),
+ IDX_IN_WL_CNT_VER_11_T(ampdu_wds),
+ IDX_IN_WL_CNT_VER_11_T(txlost),
+ IDX_IN_WL_CNT_VER_11_T(txdatamcast),
+ IDX_IN_WL_CNT_VER_11_T(txdatabcast),
+ INVALID_IDX,
+ IDX_IN_WL_CNT_VER_11_T(rxback),
+ IDX_IN_WL_CNT_VER_11_T(txback),
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ IDX_IN_WL_CNT_VER_11_T(txbcast),
+ IDX_IN_WL_CNT_VER_11_T(txdropped),
+ IDX_IN_WL_CNT_VER_11_T(rxbcast),
+ IDX_IN_WL_CNT_VER_11_T(rxdropped)
+};
+
+/* Index conversion table from wl_cnt_ver_11_t to
+ * either wl_cnt_ge40mcst_v1_t or wl_cnt_lt40mcst_v1_t
+ */
+static const uint8 wlcntver11t_to_wlcntXX40mcstv1t[WL_CNT_MCST_VAR_NUM] = {
+ IDX_IN_WL_CNT_VER_11_T(txallfrm),
+ IDX_IN_WL_CNT_VER_11_T(txrtsfrm),
+ IDX_IN_WL_CNT_VER_11_T(txctsfrm),
+ IDX_IN_WL_CNT_VER_11_T(txackfrm),
+ IDX_IN_WL_CNT_VER_11_T(txdnlfrm),
+ IDX_IN_WL_CNT_VER_11_T(txbcnfrm),
+ IDX_IN_WL_CNT_VER_11_T(txfunfl),
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 1,
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 2,
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 3,
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 4,
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 5,
+ IDX_IN_WL_CNT_VER_11_T(txfbw),
+ IDX_IN_WL_CNT_VER_11_T(txmpdu),
+ IDX_IN_WL_CNT_VER_11_T(txtplunfl),
+ IDX_IN_WL_CNT_VER_11_T(txphyerror),
+ IDX_IN_WL_CNT_VER_11_T(pktengrxducast),
+ IDX_IN_WL_CNT_VER_11_T(pktengrxdmcast),
+ IDX_IN_WL_CNT_VER_11_T(rxfrmtoolong),
+ IDX_IN_WL_CNT_VER_11_T(rxfrmtooshrt),
+ IDX_IN_WL_CNT_VER_11_T(rxinvmachdr),
+ IDX_IN_WL_CNT_VER_11_T(rxbadfcs),
+ IDX_IN_WL_CNT_VER_11_T(rxbadplcp),
+ IDX_IN_WL_CNT_VER_11_T(rxcrsglitch),
+ IDX_IN_WL_CNT_VER_11_T(rxstrt),
+ IDX_IN_WL_CNT_VER_11_T(rxdfrmucastmbss),
+ IDX_IN_WL_CNT_VER_11_T(rxmfrmucastmbss),
+ IDX_IN_WL_CNT_VER_11_T(rxcfrmucast),
+ IDX_IN_WL_CNT_VER_11_T(rxrtsucast),
+ IDX_IN_WL_CNT_VER_11_T(rxctsucast),
+ IDX_IN_WL_CNT_VER_11_T(rxackucast),
+ IDX_IN_WL_CNT_VER_11_T(rxdfrmocast),
+ IDX_IN_WL_CNT_VER_11_T(rxmfrmocast),
+ IDX_IN_WL_CNT_VER_11_T(rxcfrmocast),
+ IDX_IN_WL_CNT_VER_11_T(rxrtsocast),
+ IDX_IN_WL_CNT_VER_11_T(rxctsocast),
+ IDX_IN_WL_CNT_VER_11_T(rxdfrmmcast),
+ IDX_IN_WL_CNT_VER_11_T(rxmfrmmcast),
+ IDX_IN_WL_CNT_VER_11_T(rxcfrmmcast),
+ IDX_IN_WL_CNT_VER_11_T(rxbeaconmbss),
+ IDX_IN_WL_CNT_VER_11_T(rxdfrmucastobss),
+ IDX_IN_WL_CNT_VER_11_T(rxbeaconobss),
+ IDX_IN_WL_CNT_VER_11_T(rxrsptmout),
+ IDX_IN_WL_CNT_VER_11_T(bcntxcancl),
+ IDX_IN_WL_CNT_VER_11_T(rxnodelim),
+ IDX_IN_WL_CNT_VER_11_T(rxf0ovfl),
+ IDX_IN_WL_CNT_VER_11_T(rxf1ovfl),
+ IDX_IN_WL_CNT_VER_11_T(rxf2ovfl),
+ IDX_IN_WL_CNT_VER_11_T(txsfovfl),
+ IDX_IN_WL_CNT_VER_11_T(pmqovfl),
+ IDX_IN_WL_CNT_VER_11_T(rxcgprqfrm),
+ IDX_IN_WL_CNT_VER_11_T(rxcgprsqovfl),
+ IDX_IN_WL_CNT_VER_11_T(txcgprsfail),
+ IDX_IN_WL_CNT_VER_11_T(txcgprssuc),
+ IDX_IN_WL_CNT_VER_11_T(prs_timeout),
+ IDX_IN_WL_CNT_VER_11_T(rxnack),
+ IDX_IN_WL_CNT_VER_11_T(frmscons),
+ IDX_IN_WL_CNT_VER_11_T(txnack),
+ IDX_IN_WL_CNT_VER_11_T(rxback),
+ IDX_IN_WL_CNT_VER_11_T(txback),
+ IDX_IN_WL_CNT_VER_11_T(bphy_rxcrsglitch),
+ IDX_IN_WL_CNT_VER_11_T(rxdrop20s),
+ IDX_IN_WL_CNT_VER_11_T(rxtoolate),
+ IDX_IN_WL_CNT_VER_11_T(bphy_badplcp)
+};
+
+/* For mcst offsets that were not used. (2 Pads) */
+#define INVALID_MCST_IDX ((uint8)(-1))
+/* Index conversion table from wl_cnt_ver_11_t to wl_cnt_v_le10_mcst_t */
+static const uint8 wlcntver11t_to_wlcntvle10mcstt[WL_CNT_MCST_VAR_NUM] = {
+ IDX_IN_WL_CNT_VER_11_T(txallfrm),
+ IDX_IN_WL_CNT_VER_11_T(txrtsfrm),
+ IDX_IN_WL_CNT_VER_11_T(txctsfrm),
+ IDX_IN_WL_CNT_VER_11_T(txackfrm),
+ IDX_IN_WL_CNT_VER_11_T(txdnlfrm),
+ IDX_IN_WL_CNT_VER_11_T(txbcnfrm),
+ IDX_IN_WL_CNT_VER_11_T(txfunfl),
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 1,
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 2,
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 3,
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 4,
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 5,
+ IDX_IN_WL_CNT_VER_11_T(txfbw),
+ INVALID_MCST_IDX,
+ IDX_IN_WL_CNT_VER_11_T(txtplunfl),
+ IDX_IN_WL_CNT_VER_11_T(txphyerror),
+ IDX_IN_WL_CNT_VER_11_T(pktengrxducast),
+ IDX_IN_WL_CNT_VER_11_T(pktengrxdmcast),
+ IDX_IN_WL_CNT_VER_11_T(rxfrmtoolong),
+ IDX_IN_WL_CNT_VER_11_T(rxfrmtooshrt),
+ IDX_IN_WL_CNT_VER_11_T(rxinvmachdr),
+ IDX_IN_WL_CNT_VER_11_T(rxbadfcs),
+ IDX_IN_WL_CNT_VER_11_T(rxbadplcp),
+ IDX_IN_WL_CNT_VER_11_T(rxcrsglitch),
+ IDX_IN_WL_CNT_VER_11_T(rxstrt),
+ IDX_IN_WL_CNT_VER_11_T(rxdfrmucastmbss),
+ IDX_IN_WL_CNT_VER_11_T(rxmfrmucastmbss),
+ IDX_IN_WL_CNT_VER_11_T(rxcfrmucast),
+ IDX_IN_WL_CNT_VER_11_T(rxrtsucast),
+ IDX_IN_WL_CNT_VER_11_T(rxctsucast),
+ IDX_IN_WL_CNT_VER_11_T(rxackucast),
+ IDX_IN_WL_CNT_VER_11_T(rxdfrmocast),
+ IDX_IN_WL_CNT_VER_11_T(rxmfrmocast),
+ IDX_IN_WL_CNT_VER_11_T(rxcfrmocast),
+ IDX_IN_WL_CNT_VER_11_T(rxrtsocast),
+ IDX_IN_WL_CNT_VER_11_T(rxctsocast),
+ IDX_IN_WL_CNT_VER_11_T(rxdfrmmcast),
+ IDX_IN_WL_CNT_VER_11_T(rxmfrmmcast),
+ IDX_IN_WL_CNT_VER_11_T(rxcfrmmcast),
+ IDX_IN_WL_CNT_VER_11_T(rxbeaconmbss),
+ IDX_IN_WL_CNT_VER_11_T(rxdfrmucastobss),
+ IDX_IN_WL_CNT_VER_11_T(rxbeaconobss),
+ IDX_IN_WL_CNT_VER_11_T(rxrsptmout),
+ IDX_IN_WL_CNT_VER_11_T(bcntxcancl),
+ INVALID_MCST_IDX,
+ IDX_IN_WL_CNT_VER_11_T(rxf0ovfl),
+ IDX_IN_WL_CNT_VER_11_T(rxf1ovfl),
+ IDX_IN_WL_CNT_VER_11_T(rxf2ovfl),
+ IDX_IN_WL_CNT_VER_11_T(txsfovfl),
+ IDX_IN_WL_CNT_VER_11_T(pmqovfl),
+ IDX_IN_WL_CNT_VER_11_T(rxcgprqfrm),
+ IDX_IN_WL_CNT_VER_11_T(rxcgprsqovfl),
+ IDX_IN_WL_CNT_VER_11_T(txcgprsfail),
+ IDX_IN_WL_CNT_VER_11_T(txcgprssuc),
+ IDX_IN_WL_CNT_VER_11_T(prs_timeout),
+ IDX_IN_WL_CNT_VER_11_T(rxnack),
+ IDX_IN_WL_CNT_VER_11_T(frmscons),
+ IDX_IN_WL_CNT_VER_11_T(txnack),
+ IDX_IN_WL_CNT_VER_11_T(rxback),
+ IDX_IN_WL_CNT_VER_11_T(txback),
+ IDX_IN_WL_CNT_VER_11_T(bphy_rxcrsglitch),
+ IDX_IN_WL_CNT_VER_11_T(rxdrop20s),
+ IDX_IN_WL_CNT_VER_11_T(rxtoolate),
+ IDX_IN_WL_CNT_VER_11_T(bphy_badplcp)
+};
+
+/* Index conversion table from wl_cnt_ver_6_t to wl_cnt_v_le10_mcst_t */
+static const uint8 wlcntver6t_to_wlcntvle10mcstt[WL_CNT_MCST_VAR_NUM] = {
+ IDX_IN_WL_CNT_VER_6_T(txallfrm),
+ IDX_IN_WL_CNT_VER_6_T(txrtsfrm),
+ IDX_IN_WL_CNT_VER_6_T(txctsfrm),
+ IDX_IN_WL_CNT_VER_6_T(txackfrm),
+ IDX_IN_WL_CNT_VER_6_T(txdnlfrm),
+ IDX_IN_WL_CNT_VER_6_T(txbcnfrm),
+ IDX_IN_WL_CNT_VER_6_T(txfunfl),
+ IDX_IN_WL_CNT_VER_6_T(txfunfl) + 1,
+ IDX_IN_WL_CNT_VER_6_T(txfunfl) + 2,
+ IDX_IN_WL_CNT_VER_6_T(txfunfl) + 3,
+ IDX_IN_WL_CNT_VER_6_T(txfunfl) + 4,
+ IDX_IN_WL_CNT_VER_6_T(txfunfl) + 5,
+ IDX_IN_WL_CNT_VER_6_T(txfbw),
+ INVALID_MCST_IDX,
+ IDX_IN_WL_CNT_VER_6_T(txtplunfl),
+ IDX_IN_WL_CNT_VER_6_T(txphyerror),
+ IDX_IN_WL_CNT_VER_6_T(pktengrxducast),
+ IDX_IN_WL_CNT_VER_6_T(pktengrxdmcast),
+ IDX_IN_WL_CNT_VER_6_T(rxfrmtoolong),
+ IDX_IN_WL_CNT_VER_6_T(rxfrmtooshrt),
+ IDX_IN_WL_CNT_VER_6_T(rxinvmachdr),
+ IDX_IN_WL_CNT_VER_6_T(rxbadfcs),
+ IDX_IN_WL_CNT_VER_6_T(rxbadplcp),
+ IDX_IN_WL_CNT_VER_6_T(rxcrsglitch),
+ IDX_IN_WL_CNT_VER_6_T(rxstrt),
+ IDX_IN_WL_CNT_VER_6_T(rxdfrmucastmbss),
+ IDX_IN_WL_CNT_VER_6_T(rxmfrmucastmbss),
+ IDX_IN_WL_CNT_VER_6_T(rxcfrmucast),
+ IDX_IN_WL_CNT_VER_6_T(rxrtsucast),
+ IDX_IN_WL_CNT_VER_6_T(rxctsucast),
+ IDX_IN_WL_CNT_VER_6_T(rxackucast),
+ IDX_IN_WL_CNT_VER_6_T(rxdfrmocast),
+ IDX_IN_WL_CNT_VER_6_T(rxmfrmocast),
+ IDX_IN_WL_CNT_VER_6_T(rxcfrmocast),
+ IDX_IN_WL_CNT_VER_6_T(rxrtsocast),
+ IDX_IN_WL_CNT_VER_6_T(rxctsocast),
+ IDX_IN_WL_CNT_VER_6_T(rxdfrmmcast),
+ IDX_IN_WL_CNT_VER_6_T(rxmfrmmcast),
+ IDX_IN_WL_CNT_VER_6_T(rxcfrmmcast),
+ IDX_IN_WL_CNT_VER_6_T(rxbeaconmbss),
+ IDX_IN_WL_CNT_VER_6_T(rxdfrmucastobss),
+ IDX_IN_WL_CNT_VER_6_T(rxbeaconobss),
+ IDX_IN_WL_CNT_VER_6_T(rxrsptmout),
+ IDX_IN_WL_CNT_VER_6_T(bcntxcancl),
+ INVALID_MCST_IDX,
+ IDX_IN_WL_CNT_VER_6_T(rxf0ovfl),
+ IDX_IN_WL_CNT_VER_6_T(rxf1ovfl),
+ IDX_IN_WL_CNT_VER_6_T(rxf2ovfl),
+ IDX_IN_WL_CNT_VER_6_T(txsfovfl),
+ IDX_IN_WL_CNT_VER_6_T(pmqovfl),
+ IDX_IN_WL_CNT_VER_6_T(rxcgprqfrm),
+ IDX_IN_WL_CNT_VER_6_T(rxcgprsqovfl),
+ IDX_IN_WL_CNT_VER_6_T(txcgprsfail),
+ IDX_IN_WL_CNT_VER_6_T(txcgprssuc),
+ IDX_IN_WL_CNT_VER_6_T(prs_timeout),
+ IDX_IN_WL_CNT_VER_6_T(rxnack),
+ IDX_IN_WL_CNT_VER_6_T(frmscons),
+ IDX_IN_WL_CNT_VER_6_T(txnack),
+ IDX_IN_WL_CNT_VER_6_T(rxback),
+ IDX_IN_WL_CNT_VER_6_T(txback),
+ IDX_IN_WL_CNT_VER_6_T(bphy_rxcrsglitch),
+ IDX_IN_WL_CNT_VER_6_T(rxdrop20s),
+ IDX_IN_WL_CNT_VER_6_T(rxtoolate),
+ IDX_IN_WL_CNT_VER_6_T(bphy_badplcp)
+};
+
+/* Index conversion table from wl_cnt_ver_7_t to wl_cnt_v_le10_mcst_t */
+static const uint8 wlcntver7t_to_wlcntvle10mcstt[WL_CNT_MCST_VAR_NUM] = {
+ IDX_IN_WL_CNT_VER_7_T(txallfrm),
+ IDX_IN_WL_CNT_VER_7_T(txrtsfrm),
+ IDX_IN_WL_CNT_VER_7_T(txctsfrm),
+ IDX_IN_WL_CNT_VER_7_T(txackfrm),
+ IDX_IN_WL_CNT_VER_7_T(txdnlfrm),
+ IDX_IN_WL_CNT_VER_7_T(txbcnfrm),
+ IDX_IN_WL_CNT_VER_7_T(txfunfl),
+ IDX_IN_WL_CNT_VER_7_T(txfunfl) + 1,
+ IDX_IN_WL_CNT_VER_7_T(txfunfl) + 2,
+ IDX_IN_WL_CNT_VER_7_T(txfunfl) + 3,
+ IDX_IN_WL_CNT_VER_7_T(txfunfl) + 4,
+ IDX_IN_WL_CNT_VER_7_T(txfunfl) + 5,
+ INVALID_MCST_IDX,
+ INVALID_MCST_IDX,
+ IDX_IN_WL_CNT_VER_7_T(txtplunfl),
+ IDX_IN_WL_CNT_VER_7_T(txphyerror),
+ IDX_IN_WL_CNT_VER_7_T(pktengrxducast),
+ IDX_IN_WL_CNT_VER_7_T(pktengrxdmcast),
+ IDX_IN_WL_CNT_VER_7_T(rxfrmtoolong),
+ IDX_IN_WL_CNT_VER_7_T(rxfrmtooshrt),
+ IDX_IN_WL_CNT_VER_7_T(rxinvmachdr),
+ IDX_IN_WL_CNT_VER_7_T(rxbadfcs),
+ IDX_IN_WL_CNT_VER_7_T(rxbadplcp),
+ IDX_IN_WL_CNT_VER_7_T(rxcrsglitch),
+ IDX_IN_WL_CNT_VER_7_T(rxstrt),
+ IDX_IN_WL_CNT_VER_7_T(rxdfrmucastmbss),
+ IDX_IN_WL_CNT_VER_7_T(rxmfrmucastmbss),
+ IDX_IN_WL_CNT_VER_7_T(rxcfrmucast),
+ IDX_IN_WL_CNT_VER_7_T(rxrtsucast),
+ IDX_IN_WL_CNT_VER_7_T(rxctsucast),
+ IDX_IN_WL_CNT_VER_7_T(rxackucast),
+ IDX_IN_WL_CNT_VER_7_T(rxdfrmocast),
+ IDX_IN_WL_CNT_VER_7_T(rxmfrmocast),
+ IDX_IN_WL_CNT_VER_7_T(rxcfrmocast),
+ IDX_IN_WL_CNT_VER_7_T(rxrtsocast),
+ IDX_IN_WL_CNT_VER_7_T(rxctsocast),
+ IDX_IN_WL_CNT_VER_7_T(rxdfrmmcast),
+ IDX_IN_WL_CNT_VER_7_T(rxmfrmmcast),
+ IDX_IN_WL_CNT_VER_7_T(rxcfrmmcast),
+ IDX_IN_WL_CNT_VER_7_T(rxbeaconmbss),
+ IDX_IN_WL_CNT_VER_7_T(rxdfrmucastobss),
+ IDX_IN_WL_CNT_VER_7_T(rxbeaconobss),
+ IDX_IN_WL_CNT_VER_7_T(rxrsptmout),
+ IDX_IN_WL_CNT_VER_7_T(bcntxcancl),
+ INVALID_MCST_IDX,
+ IDX_IN_WL_CNT_VER_7_T(rxf0ovfl),
+ IDX_IN_WL_CNT_VER_7_T(rxf1ovfl),
+ IDX_IN_WL_CNT_VER_7_T(rxf2ovfl),
+ IDX_IN_WL_CNT_VER_7_T(txsfovfl),
+ IDX_IN_WL_CNT_VER_7_T(pmqovfl),
+ IDX_IN_WL_CNT_VER_7_T(rxcgprqfrm),
+ IDX_IN_WL_CNT_VER_7_T(rxcgprsqovfl),
+ IDX_IN_WL_CNT_VER_7_T(txcgprsfail),
+ IDX_IN_WL_CNT_VER_7_T(txcgprssuc),
+ IDX_IN_WL_CNT_VER_7_T(prs_timeout),
+ IDX_IN_WL_CNT_VER_7_T(rxnack),
+ IDX_IN_WL_CNT_VER_7_T(frmscons),
+ IDX_IN_WL_CNT_VER_7_T(txnack),
+ INVALID_MCST_IDX,
+ INVALID_MCST_IDX,
+ IDX_IN_WL_CNT_VER_7_T(bphy_rxcrsglitch),
+ INVALID_MCST_IDX,
+ INVALID_MCST_IDX,
+ INVALID_MCST_IDX
+};
+
+/* copy wlc layer counters from old type cntbuf to wl_cnt_wlc_t type. */
+static int
+wl_copy_wlccnt(uint16 cntver, uint32 *dst, uint32 *src, uint8 src_max_idx)
+{
+ uint i;
+ if (dst == NULL || src == NULL) {
+ return BCME_ERROR;
+ }
+
+ /* Init wlccnt with invalid value. Unchanged value will not be printed out */
+ for (i = 0; i < (sizeof(wl_cnt_wlc_t) / sizeof(uint32)); i++) {
+ dst[i] = INVALID_CNT_VAL;
+ }
+
+ if (cntver == WL_CNT_VERSION_6) {
+ for (i = 0; i < NUM_OF_WLCCNT_IN_WL_CNT_VER_6_T; i++) {
+ if (wlcntver6t_to_wlcntwlct[i] >= src_max_idx) {
+ /* src buffer does not have counters from here */
+ break;
+ }
+ dst[i] = src[wlcntver6t_to_wlcntwlct[i]];
+ }
+ } else if (cntver == WL_CNT_VERSION_7) {
+ for (i = 0; i < NUM_OF_WLCCNT_IN_WL_CNT_VER_7_T; i++) {
+ if (wlcntver7t_to_wlcntwlct[i] >= src_max_idx ||
+ wlcntver7t_to_wlcntwlct[i] == INVALID_IDX) {
+ continue;
+ }
+ dst[i] = src[wlcntver7t_to_wlcntwlct[i]];
+ }
+ } else {
+ for (i = 0; i < NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T; i++) {
+ if (wlcntver11t_to_wlcntwlct[i] >= src_max_idx) {
+ if (wlcntver11t_to_wlcntwlct[i] == INVALID_IDX) {
+ continue;
+ }
+ else {
+ /* src buffer does not have counters from here */
+ break;
+ }
+ }
+ dst[i] = src[wlcntver11t_to_wlcntwlct[i]];
+ }
+ }
+ return BCME_OK;
+}
+
+/* copy macstat counters from old type cntbuf to wl_cnt_v_le10_mcst_t type. */
+static int
+wl_copy_macstat_upto_ver10(uint16 cntver, uint32 *dst, uint32 *src)
+{
+ uint i;
+
+ if (dst == NULL || src == NULL) {
+ return BCME_ERROR;
+ }
+
+ if (cntver == WL_CNT_VERSION_6) {
+ for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) {
+ if (wlcntver6t_to_wlcntvle10mcstt[i] == INVALID_MCST_IDX) {
+ /* This mcst counter does not exist in wl_cnt_ver_6_t */
+ dst[i] = INVALID_CNT_VAL;
+ } else {
+ dst[i] = src[wlcntver6t_to_wlcntvle10mcstt[i]];
+ }
+ }
+ } else if (cntver == WL_CNT_VERSION_7) {
+ for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) {
+ if (wlcntver7t_to_wlcntvle10mcstt[i] == INVALID_MCST_IDX) {
+ /* This mcst counter does not exist in wl_cnt_ver_7_t */
+ dst[i] = INVALID_CNT_VAL;
+ } else {
+ dst[i] = src[wlcntver7t_to_wlcntvle10mcstt[i]];
+ }
+ }
+ } else {
+ for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) {
+ if (wlcntver11t_to_wlcntvle10mcstt[i] == INVALID_MCST_IDX) {
+ /* This mcst counter does not exist in wl_cnt_ver_11_t */
+ dst[i] = INVALID_CNT_VAL;
+ } else {
+ dst[i] = src[wlcntver11t_to_wlcntvle10mcstt[i]];
+ }
+ }
+ }
+ return BCME_OK;
+}
+
+static int
+wl_copy_macstat_ver11(uint32 *dst, uint32 *src)
+{
+ uint i;
+
+ if (dst == NULL || src == NULL) {
+ return BCME_ERROR;
+ }
+
+ for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) {
+ dst[i] = src[wlcntver11t_to_wlcntXX40mcstv1t[i]];
+ }
+ return BCME_OK;
+}
+
+/**
+ * Translate non-xtlv 'wl counters' IOVar buffer received by old driver/FW to xtlv format.
+ * Parameters:
+ * cntbuf: pointer to non-xtlv 'wl counters' IOVar buffer received by old driver/FW.
+ * Newly translated xtlv format is written to this pointer.
+ * buflen: length of the "cntbuf" without any padding.
+ * corerev: chip core revision of the driver/FW.
+ */
+int
+wl_cntbuf_to_xtlv_format(void *ctx, void *cntbuf, int buflen, uint32 corerev)
+{
+ wl_cnt_wlc_t *wlccnt = NULL;
+ uint32 *macstat = NULL;
+ xtlv_desc_t xtlv_desc[3];
+ uint16 mcst_xtlv_id;
+ int res = BCME_OK;
+ wl_cnt_info_t *cntinfo = cntbuf;
+ uint8 *xtlvbuf_p = cntinfo->data;
+ uint16 ver = cntinfo->version;
+ uint16 xtlvbuflen = (uint16)buflen;
+ uint16 src_max_idx;
+#ifdef BCMDRIVER
+ osl_t *osh = ctx;
+#else
+ BCM_REFERENCE(ctx);
+#endif
+
+ if (ver >= WL_CNT_VERSION_XTLV) {
+ /* Already in xtlv format. */
+ goto exit;
+ }
+
+#ifdef BCMDRIVER
+ wlccnt = MALLOC(osh, sizeof(*wlccnt));
+ macstat = MALLOC(osh, WL_CNT_MCST_STRUCT_SZ);
+#else
+ wlccnt = (wl_cnt_wlc_t *)malloc(sizeof(*wlccnt));
+ macstat = (uint32 *)malloc(WL_CNT_MCST_STRUCT_SZ);
+#endif
+ if (!wlccnt || !macstat) {
+ printf("wl_cntbuf_to_xtlv_format: malloc fail!\n");
+ res = BCME_NOMEM;
+ goto exit;
+ }
+
+ /* Check if the max idx in the struct exceeds the boundary of uint8 */
+ if (NUM_OF_CNT_IN_WL_CNT_VER_6_T > ((uint8)(-1) + 1) ||
+ NUM_OF_CNT_IN_WL_CNT_VER_7_T > ((uint8)(-1) + 1) ||
+ NUM_OF_CNT_IN_WL_CNT_VER_11_T > ((uint8)(-1) + 1)) {
+ printf("wlcntverXXt_to_wlcntwlct and src_max_idx need"
+ " to be of uint16 instead of uint8\n");
+ res = BCME_ERROR;
+ goto exit;
+ }
+
+ /* Exclude version and length fields in either wlc_cnt_ver_6_t or wlc_cnt_ver_11_t */
+ src_max_idx = (cntinfo->datalen - OFFSETOF(wl_cnt_info_t, data)) / sizeof(uint32);
+ if (src_max_idx > (uint8)(-1)) {
+ printf("wlcntverXXt_to_wlcntwlct and src_max_idx need"
+ " to be of uint16 instead of uint8\n"
+ "Try updating wl utility to the latest.\n");
+ src_max_idx = (uint8)(-1);
+ }
+
+ /* Copy wlc layer counters to wl_cnt_wlc_t */
+ res = wl_copy_wlccnt(ver, (uint32 *)wlccnt, (uint32 *)cntinfo->data, (uint8)src_max_idx);
+ if (res != BCME_OK) {
+ printf("wl_copy_wlccnt fail!\n");
+ goto exit;
+ }
+
+ /* Copy macstat counters to wl_cnt_wlc_t */
+ if (ver == WL_CNT_VERSION_11) {
+ res = wl_copy_macstat_ver11(macstat, (uint32 *)cntinfo->data);
+ if (res != BCME_OK) {
+ printf("wl_copy_macstat_ver11 fail!\n");
+ goto exit;
+ }
+ if (corerev >= 40) {
+ mcst_xtlv_id = WL_CNT_XTLV_GE40_UCODE_V1;
+ } else {
+ mcst_xtlv_id = WL_CNT_XTLV_LT40_UCODE_V1;
+ }
+ } else {
+ res = wl_copy_macstat_upto_ver10(ver, macstat, (uint32 *)cntinfo->data);
+ if (res != BCME_OK) {
+ printf("wl_copy_macstat_upto_ver10 fail!\n");
+ goto exit;
+ }
+ mcst_xtlv_id = WL_CNT_XTLV_CNTV_LE10_UCODE;
+ }
+
+ xtlv_desc[0].type = WL_CNT_XTLV_WLC;
+ xtlv_desc[0].len = sizeof(*wlccnt);
+ xtlv_desc[0].ptr = wlccnt;
+
+ xtlv_desc[1].type = mcst_xtlv_id;
+ xtlv_desc[1].len = WL_CNT_MCST_STRUCT_SZ;
+ xtlv_desc[1].ptr = macstat;
+
+ xtlv_desc[2].type = 0;
+ xtlv_desc[2].len = 0;
+ xtlv_desc[2].ptr = NULL;
+
+ memset(cntbuf, 0, buflen);
+
+ res = bcm_pack_xtlv_buf_from_mem(&xtlvbuf_p, &xtlvbuflen,
+ xtlv_desc, BCM_XTLV_OPTION_ALIGN32);
+ cntinfo->datalen = (buflen - xtlvbuflen);
+exit:
+#ifdef BCMDRIVER
+ if (wlccnt) {
+ MFREE(osh, wlccnt, sizeof(*wlccnt));
+ }
+ if (macstat) {
+ MFREE(osh, macstat, WL_CNT_MCST_STRUCT_SZ);
+ }
+#else
+ if (wlccnt) {
+ free(wlccnt);
+ }
+ if (macstat) {
+ free(macstat);
+ }
+#endif
+ return res;
+}
diff --git a/bcmdhd.101.10.361.x/bcm_l2_filter.c b/bcmdhd.101.10.361.x/bcm_l2_filter.c
new file mode 100755
index 0000000..5a5ca2c
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcm_l2_filter.c
@@ -0,0 +1,766 @@
+/*
+ * L2 Filter handling functions
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ *
+ */
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdefs.h>
+#include <bcmdevs.h>
+#include <ethernet.h>
+#include <bcmip.h>
+#include <bcmipv6.h>
+#include <bcmudp.h>
+#include <bcmarp.h>
+#include <bcmicmp.h>
+#include <bcmproto.h>
+#include <bcmdhcp.h>
+#include <802.11.h>
+#include <bcm_l2_filter.h>
+
+#ifdef BCMDBG_ERR
+#define L2_FILTER_ERROR(args) printf args
+#else
+#define L2_FILTER_ERROR(args)
+#endif /* BCMDBG_ERR */
+
+#ifdef BCMDBG_MSG
+#define L2_FILTER_MSG(args) printf args
+#else
+#define L2_FILTER_MSG(args)
+#endif /* BCMDBG_msg */
+
+struct arp_table {
+ parp_entry_t *parp_table[BCM_PARP_TABLE_SIZE]; /* proxyarp entries in cache table */
+ parp_entry_t *parp_candidate_list; /* proxyarp entries in candidate list */
+ uint8 parp_smac[ETHER_ADDR_LEN]; /* L2 SMAC from DHCP Req */
+ uint8 parp_cmac[ETHER_ADDR_LEN]; /* Bootp Client MAC from DHCP Req */
+};
+#ifdef DHD_DUMP_ARPTABLE
+void bcm_l2_parp_dump_table(arp_table_t* arp_tbl);
+
+void
+bcm_l2_parp_dump_table(arp_table_t* arp_tbl)
+{
+ parp_entry_t *entry;
+ uint16 idx, ip_len;
+ arp_table_t *ptable;
+ ip_len = IPV4_ADDR_LEN;
+ ptable = arp_tbl;
+ for (idx = 0; idx < BCM_PARP_TABLE_SIZE; idx++) {
+ entry = ptable->parp_table[idx];
+ while (entry) {
+ printf("Cached entries..\n");
+ printf("%d: %d.%d.%d.%d", idx, entry->ip.data[0], entry->ip.data[1],
+ entry->ip.data[2], entry->ip.data[3]);
+ printf("%02x:%02x:%02x:%02x:%02x:%02x", entry->ea.octet[0],
+ entry->ea.octet[1], entry->ea.octet[2], entry->ea.octet[3],
+ entry->ea.octet[4], entry->ea.octet[5]);
+ printf("\n");
+ entry = entry->next;
+ }
+ }
+ entry = ptable->parp_candidate_list;
+ while (entry) {
+ printf("Candidate entries..\n");
+ printf("%d.%d.%d.%d", entry->ip.data[0], entry->ip.data[1],
+ entry->ip.data[2], entry->ip.data[3]);
+ printf("%02x:%02x:%02x:%02x:%02x:%02x", entry->ea.octet[0],
+ entry->ea.octet[1], entry->ea.octet[2], entry->ea.octet[3],
+ entry->ea.octet[4], entry->ea.octet[5]);
+
+ printf("\n");
+ entry = entry->next;
+ }
+}
+#endif /* DHD_DUMP_ARPTABLE */
+
+arp_table_t* init_l2_filter_arp_table(osl_t* osh)
+{
+ return ((arp_table_t*)MALLOCZ(osh, sizeof(arp_table_t)));
+}
+
+void deinit_l2_filter_arp_table(osl_t* osh, arp_table_t* ptable)
+{
+ MFREE(osh, ptable, sizeof(arp_table_t));
+}
+/* returns 0 if gratuitous ARP or unsolicited neighbour advertisement */
+int
+bcm_l2_filter_gratuitous_arp(osl_t *osh, void *pktbuf)
+{
+ uint8 *frame = PKTDATA(osh, pktbuf);
+ uint16 ethertype;
+ int send_ip_offset, target_ip_offset;
+ int iplen;
+ int minlen;
+ uint8 *data;
+ int datalen;
+ bool snap;
+
+ if (get_pkt_ether_type(osh, pktbuf, &data, &datalen, ðertype, &snap) != BCME_OK)
+ return BCME_ERROR;
+
+ if (!ETHER_ISBCAST(frame + ETHER_DEST_OFFSET) &&
+ bcmp(ðer_ipv6_mcast, frame + ETHER_DEST_OFFSET, sizeof(ether_ipv6_mcast))) {
+ return BCME_ERROR;
+ }
+
+ if (ethertype == ETHER_TYPE_ARP) {
+ L2_FILTER_MSG(("bcm_l2_filter_gratuitous_arp: ARP RX data : %p: datalen : %d\n",
+ data, datalen));
+ send_ip_offset = ARP_SRC_IP_OFFSET;
+ target_ip_offset = ARP_TGT_IP_OFFSET;
+ iplen = IPV4_ADDR_LEN;
+ minlen = ARP_DATA_LEN;
+ } else if (ethertype == ETHER_TYPE_IPV6) {
+ send_ip_offset = NEIGHBOR_ADVERTISE_SRC_IPV6_OFFSET;
+ target_ip_offset = NEIGHBOR_ADVERTISE_TGT_IPV6_OFFSET;
+ iplen = IPV6_ADDR_LEN;
+ minlen = target_ip_offset + iplen;
+
+ /* check for neighbour advertisement */
+ if (datalen >= minlen && (data[IPV6_NEXT_HDR_OFFSET] != IP_PROT_ICMP6 ||
+ data[NEIGHBOR_ADVERTISE_TYPE_OFFSET] != NEIGHBOR_ADVERTISE_TYPE))
+ return BCME_ERROR;
+
+ /* Dont drop Unsolicitated NA fm AP with allnode mcast dest addr (HS2-4.5.E) */
+ if (datalen >= minlen &&
+ (data[IPV6_NEXT_HDR_OFFSET] == IP_PROT_ICMP6) &&
+ (data[NEIGHBOR_ADVERTISE_TYPE_OFFSET] == NEIGHBOR_ADVERTISE_TYPE) &&
+ (data[NEIGHBOR_ADVERTISE_OPTION_OFFSET] == OPT_TYPE_TGT_LINK_ADDR)) {
+ L2_FILTER_MSG(("Unsolicitated Neighbour Advertisement from AP "
+ "with allnode mcast dest addr tx'ed (%d)\n", datalen));
+ return -1;
+ }
+
+ } else {
+ return BCME_ERROR;
+ }
+
+ if (datalen < minlen) {
+ L2_FILTER_MSG(("BCM: dhd_gratuitous_arp: truncated packet (%d)\n", datalen));
+ return BCME_ERROR;
+ }
+
+ if (bcmp(data + send_ip_offset, data + target_ip_offset, iplen) == 0) {
+ L2_FILTER_MSG((" returning BCME_OK in bcm_l2_filter_gratuitous_arp\n"));
+ return BCME_OK;
+ }
+
+ return BCME_ERROR;
+}
+int
+get_pkt_ether_type(osl_t *osh, void *pktbuf,
+ uint8 **data_ptr, int *len_ptr, uint16 *et_ptr, bool *snap_ptr)
+{
+ uint8 *frame = PKTDATA(osh, pktbuf);
+ int length = PKTLEN(osh, pktbuf);
+ uint8 *pt; /* Pointer to type field */
+ uint16 ethertype;
+ bool snap = FALSE;
+ /* Process Ethernet II or SNAP-encapsulated 802.3 frames */
+ if (length < ETHER_HDR_LEN) {
+ L2_FILTER_MSG(("BCM: get_pkt_ether_type: short eth frame (%d)\n",
+ length));
+ return BCME_ERROR;
+ } else if (ntoh16_ua(frame + ETHER_TYPE_OFFSET) >= ETHER_TYPE_MIN) {
+ /* Frame is Ethernet II */
+ pt = frame + ETHER_TYPE_OFFSET;
+ } else if (length >= ETHER_HDR_LEN + SNAP_HDR_LEN + ETHER_TYPE_LEN &&
+ !bcmp(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN)) {
+ pt = frame + ETHER_HDR_LEN + SNAP_HDR_LEN;
+ snap = TRUE;
+ } else {
+ L2_FILTER_MSG((" get_pkt_ether_type: non-SNAP 802.3 frame\n"));
+ return BCME_ERROR;
+ }
+
+ ethertype = ntoh16_ua(pt);
+
+ /* Skip VLAN tag, if any */
+ if (ethertype == ETHER_TYPE_8021Q) {
+ pt += VLAN_TAG_LEN;
+
+ if ((pt + ETHER_TYPE_LEN) > (frame + length)) {
+ L2_FILTER_MSG(("BCM: get_pkt_ether_type: short VLAN frame (%d)\n",
+ length));
+ return BCME_ERROR;
+ }
+ ethertype = ntoh16_ua(pt);
+ }
+ *data_ptr = pt + ETHER_TYPE_LEN;
+ *len_ptr = length - (int32)(pt + ETHER_TYPE_LEN - frame);
+ *et_ptr = ethertype;
+ *snap_ptr = snap;
+ return BCME_OK;
+}
+
+int
+get_pkt_ip_type(osl_t *osh, void *pktbuf,
+ uint8 **data_ptr, int *len_ptr, uint8 *prot_ptr)
+{
+ struct ipv4_hdr *iph; /* IP frame pointer */
+ int iplen; /* IP frame length */
+ uint16 ethertype, iphdrlen, ippktlen;
+ uint16 iph_frag;
+ uint8 prot;
+ bool snap;
+
+ if (get_pkt_ether_type(osh, pktbuf, (uint8 **)&iph,
+ &iplen, ðertype, &snap) != 0)
+ return BCME_ERROR;
+
+ if (ethertype != ETHER_TYPE_IP) {
+ return BCME_ERROR;
+ }
+
+ /* We support IPv4 only */
+ if (iplen < IPV4_OPTIONS_OFFSET || (IP_VER(iph) != IP_VER_4)) {
+ return BCME_ERROR;
+ }
+
+ /* Header length sanity */
+ iphdrlen = IPV4_HLEN(iph);
+
+ /*
+ * Packet length sanity; sometimes we receive eth-frame size bigger
+ * than the IP content, which results in a bad tcp chksum
+ */
+ ippktlen = ntoh16(iph->tot_len);
+ if (ippktlen < iplen) {
+ L2_FILTER_MSG(("get_pkt_ip_type: extra frame length ignored\n"));
+ iplen = ippktlen;
+ } else if (ippktlen > iplen) {
+ L2_FILTER_MSG(("get_pkt_ip_type: truncated IP packet (%d)\n",
+ ippktlen - iplen));
+ return BCME_ERROR;
+ }
+
+ if (iphdrlen < IPV4_OPTIONS_OFFSET || iphdrlen > iplen) {
+ L2_FILTER_ERROR((" get_pkt_ip_type: IP-header-len (%d) out of range (%d-%d)\n",
+ iphdrlen, IPV4_OPTIONS_OFFSET, iplen));
+ return BCME_ERROR;
+ }
+
+ /*
+ * We don't handle fragmented IP packets. A first frag is indicated by the MF
+ * (more frag) bit and a subsequent frag is indicated by a non-zero frag offset.
+ */
+ iph_frag = ntoh16(iph->frag);
+
+ if ((iph_frag & IPV4_FRAG_MORE) || (iph_frag & IPV4_FRAG_OFFSET_MASK) != 0) {
+ L2_FILTER_ERROR(("get_pkt_ip_type: IP fragment not handled\n"));
+ return BCME_ERROR;
+ }
+ prot = IPV4_PROT(iph);
+ *data_ptr = (((uint8 *)iph) + iphdrlen);
+ *len_ptr = iplen - iphdrlen;
+ *prot_ptr = prot;
+ return BCME_OK;
+}
+
+/* Check if packet type is ICMP ECHO */
+int bcm_l2_filter_block_ping(osl_t *osh, void *pktbuf)
+{
+ struct bcmicmp_hdr *icmph;
+ int udpl;
+ uint8 prot;
+
+ if (get_pkt_ip_type(osh, pktbuf, (uint8 **)&icmph, &udpl, &prot) != 0)
+ return BCME_ERROR;
+ if (prot == IP_PROT_ICMP) {
+ if (icmph->type == ICMP_TYPE_ECHO_REQUEST)
+ return BCME_OK;
+ }
+ return BCME_ERROR;
+}
+
+int bcm_l2_filter_get_mac_addr_dhcp_pkt(osl_t *osh, void *pktbuf,
+ int ifidx, uint8** mac_addr)
+{
+ uint8 *eh = PKTDATA(osh, pktbuf);
+ uint8 *udph;
+ uint8 *dhcp;
+ int udpl;
+ int dhcpl;
+ uint16 port;
+ uint8 prot;
+
+ if (!ETHER_ISMULTI(eh + ETHER_DEST_OFFSET))
+ return BCME_ERROR;
+ if (get_pkt_ip_type(osh, pktbuf, &udph, &udpl, &prot) != 0)
+ return BCME_ERROR;
+ if (prot != IP_PROT_UDP)
+ return BCME_ERROR;
+ /* check frame length, at least UDP_HDR_LEN */
+ if (udpl < UDP_HDR_LEN) {
+ L2_FILTER_MSG(("BCM: bcm_l2_filter_get_mac_addr_dhcp_pkt: short UDP frame,"
+ " ignored\n"));
+ return BCME_ERROR;
+ }
+ port = ntoh16_ua(udph + UDP_DEST_PORT_OFFSET);
+ /* only process DHCP packets from server to client */
+ if (port != DHCP_PORT_CLIENT)
+ return BCME_ERROR;
+
+ dhcp = udph + UDP_HDR_LEN;
+ dhcpl = udpl - UDP_HDR_LEN;
+
+ if (dhcpl < DHCP_CHADDR_OFFSET + ETHER_ADDR_LEN) {
+ L2_FILTER_MSG(("BCM: bcm_l2_filter_get_mac_addr_dhcp_pkt: short DHCP frame,"
+ " ignored\n"));
+ return BCME_ERROR;
+ }
+ /* only process DHCP reply(offer/ack) packets */
+ if (*(dhcp + DHCP_TYPE_OFFSET) != DHCP_TYPE_REPLY)
+ return BCME_ERROR;
+ /* chaddr = dhcp + DHCP_CHADDR_OFFSET; */
+ *mac_addr = dhcp + DHCP_CHADDR_OFFSET;
+ return BCME_OK;
+}
+/* modify the mac address for IP, in arp table */
+int
+bcm_l2_filter_parp_modifyentry(arp_table_t* arp_tbl, struct ether_addr *ea,
+ uint8 *ip, uint8 ip_ver, bool cached, unsigned int entry_tickcnt)
+{
+ parp_entry_t *entry;
+ uint8 idx, ip_len;
+ arp_table_t *ptable;
+
+ if (ip_ver == IP_VER_4 && !IPV4_ADDR_NULL(ip) && !IPV4_ADDR_BCAST(ip)) {
+ idx = BCM_PARP_TABLE_INDEX(ip[IPV4_ADDR_LEN - 1]);
+ ip_len = IPV4_ADDR_LEN;
+ }
+ else if (ip_ver == IP_VER_6 && !IPV6_ADDR_NULL(ip)) {
+ idx = BCM_PARP_TABLE_INDEX(ip[IPV6_ADDR_LEN - 1]);
+ ip_len = IPV6_ADDR_LEN;
+ }
+ else {
+ return BCME_ERROR;
+ }
+
+ ptable = arp_tbl;
+ if (cached) {
+ entry = ptable->parp_table[idx];
+ } else {
+ entry = ptable->parp_candidate_list;
+ }
+ while (entry) {
+ if (bcmp(entry->ip.data, ip, ip_len) == 0) {
+ /* entry matches, overwrite mac content and return */
+ bcopy((void *)ea, (void *)&entry->ea, ETHER_ADDR_LEN);
+ entry->used = entry_tickcnt;
+#ifdef DHD_DUMP_ARPTABLE
+ bcm_l2_parp_dump_table(arp_tbl);
+#endif
+ return BCME_OK;
+ }
+ entry = entry->next;
+ }
+#ifdef DHD_DUMP_ARPTABLE
+ bcm_l2_parp_dump_table(arp_tbl);
+#endif
+ return BCME_ERROR;
+}
+
+/* Add the IP entry in ARP table based on Cached argument, if cached argument is
+ * non zero positive value: it adds to parp_table, else adds to
+ * parp_candidate_list
+ */
+int
+bcm_l2_filter_parp_addentry(osl_t *osh, arp_table_t* arp_tbl, struct ether_addr *ea,
+ uint8 *ip, uint8 ip_ver, bool cached, unsigned int entry_tickcnt)
+{
+ parp_entry_t *entry;
+ uint8 idx, ip_len;
+ arp_table_t *ptable;
+
+ if (ip_ver == IP_VER_4 && !IPV4_ADDR_NULL(ip) && !IPV4_ADDR_BCAST(ip)) {
+ idx = BCM_PARP_TABLE_INDEX(ip[IPV4_ADDR_LEN - 1]);
+ ip_len = IPV4_ADDR_LEN;
+ }
+ else if (ip_ver == IP_VER_6 && !IPV6_ADDR_NULL(ip)) {
+ idx = BCM_PARP_TABLE_INDEX(ip[IPV6_ADDR_LEN - 1]);
+ ip_len = IPV6_ADDR_LEN;
+ }
+ else {
+ return BCME_ERROR;
+ }
+
+ if ((entry = MALLOCZ(osh, sizeof(parp_entry_t) + ip_len)) == NULL) {
+ L2_FILTER_MSG(("Allocating new parp_entry for IPv%d failed!!\n", ip_ver));
+ return BCME_NOMEM;
+ }
+
+ bcopy((void *)ea, (void *)&entry->ea, ETHER_ADDR_LEN);
+ entry->used = entry_tickcnt;
+ entry->ip.id = ip_ver;
+ entry->ip.len = ip_len;
+ bcopy(ip, entry->ip.data, ip_len);
+ ptable = arp_tbl;
+ if (cached) {
+ entry->next = ptable->parp_table[idx];
+ ptable->parp_table[idx] = entry;
+ } else {
+ entry->next = ptable->parp_candidate_list;
+ ptable->parp_candidate_list = entry;
+ }
+#ifdef DHD_DUMP_ARPTABLE
+ bcm_l2_parp_dump_table(arp_tbl);
+#endif
+ return BCME_OK;
+}
+
+/* Delete the IP entry in ARP table based on Cached argument, if cached argument is
+ * non zero positive value: it delete from parp_table, else delete from
+ * parp_candidate_list
+ */
+int
+bcm_l2_filter_parp_delentry(osl_t* osh, arp_table_t *arp_tbl, struct ether_addr *ea,
+ uint8 *ip, uint8 ip_ver, bool cached)
+{
+ parp_entry_t *entry, *prev = NULL;
+ uint8 idx, ip_len;
+ arp_table_t *ptable;
+
+ if (ip_ver == IP_VER_4) {
+ idx = BCM_PARP_TABLE_INDEX(ip[IPV4_ADDR_LEN - 1]);
+ ip_len = IPV4_ADDR_LEN;
+ }
+ else if (ip_ver == IP_VER_6) {
+ idx = BCM_PARP_TABLE_INDEX(ip[IPV6_ADDR_LEN - 1]);
+ ip_len = IPV6_ADDR_LEN;
+ }
+ else {
+ return BCME_ERROR;
+ }
+ ptable = arp_tbl;
+ if (cached) {
+ entry = ptable->parp_table[idx];
+ } else {
+ entry = ptable->parp_candidate_list;
+ }
+ while (entry) {
+ if (entry->ip.id == ip_ver &&
+ bcmp(entry->ip.data, ip, ip_len) == 0 &&
+ bcmp(&entry->ea, ea, ETHER_ADDR_LEN) == 0) {
+ if (prev == NULL) {
+ if (cached) {
+ ptable->parp_table[idx] = entry->next;
+ } else {
+ ptable->parp_candidate_list = entry->next;
+ }
+ } else {
+ prev->next = entry->next;
+ }
+ break;
+ }
+ prev = entry;
+ entry = entry->next;
+ }
+ if (entry != NULL)
+ MFREE(osh, entry, sizeof(parp_entry_t) + ip_len);
+#ifdef DHD_DUMP_ARPTABLE
+ bcm_l2_parp_dump_table(arp_tbl);
+#endif
+ return BCME_OK;
+}
+
+/* search the IP entry in ARP table based on Cached argument, if cached argument is
+ * non zero positive value: it searches from parp_table, else search from
+ * parp_candidate_list
+ */
+parp_entry_t *
+bcm_l2_filter_parp_findentry(arp_table_t* arp_tbl, uint8 *ip, uint8 ip_ver, bool cached,
+ unsigned int entry_tickcnt)
+{
+ parp_entry_t *entry;
+ uint8 idx, ip_len;
+ arp_table_t *ptable;
+
+ if (ip_ver == IP_VER_4) {
+ idx = BCM_PARP_TABLE_INDEX(ip[IPV4_ADDR_LEN - 1]);
+ ip_len = IPV4_ADDR_LEN;
+ } else if (ip_ver == IP_VER_6) {
+ idx = BCM_PARP_TABLE_INDEX(ip[IPV6_ADDR_LEN - 1]);
+ ip_len = IPV6_ADDR_LEN;
+ } else {
+ return NULL;
+ }
+ ptable = arp_tbl;
+ if (cached) {
+ entry = ptable->parp_table[idx];
+ } else {
+ entry = ptable->parp_candidate_list;
+ }
+ while (entry) {
+ if (entry->ip.id == ip_ver && bcmp(entry->ip.data, ip, ip_len) == 0) {
+ /* time stamp of adding the station entry to arp table for ifp */
+ entry->used = entry_tickcnt;
+ break;
+ }
+ entry = entry->next;
+ }
+ return entry;
+}
+
+/* update arp table entries for every proxy arp enable interface */
+void
+bcm_l2_filter_arp_table_update(osl_t *osh, arp_table_t* arp_tbl, bool all, uint8 *del_ea,
+ bool periodic, unsigned int tickcnt)
+{
+ parp_entry_t *prev, *entry, *delentry;
+ uint8 idx, ip_ver;
+ struct ether_addr ea;
+ uint8 ip[IPV6_ADDR_LEN];
+ arp_table_t *ptable;
+
+ ptable = arp_tbl;
+ for (idx = 0; idx < BCM_PARP_TABLE_SIZE; idx++) {
+ entry = ptable->parp_table[idx];
+ while (entry) {
+ /* check if the entry need to be removed */
+ if (all || (periodic && BCM_PARP_IS_TIMEOUT(tickcnt, entry)) ||
+ (del_ea != NULL && !bcmp(del_ea, &entry->ea, ETHER_ADDR_LEN))) {
+ /* copy frame here */
+ ip_ver = entry->ip.id;
+ bcopy(entry->ip.data, ip, entry->ip.len);
+ bcopy(&entry->ea, &ea, ETHER_ADDR_LEN);
+ entry = entry->next;
+ bcm_l2_filter_parp_delentry(osh, ptable, &ea, ip, ip_ver, TRUE);
+ }
+ else {
+ entry = entry->next;
+ }
+ }
+ }
+
+ /* remove candidate or promote to real entry */
+ prev = delentry = NULL;
+ entry = ptable->parp_candidate_list;
+ while (entry) {
+ /* remove candidate */
+ if (all || (periodic && BCM_PARP_ANNOUNCE_WAIT_REACH(tickcnt, entry)) ||
+ (del_ea != NULL && !bcmp(del_ea, (uint8 *)&entry->ea, ETHER_ADDR_LEN))) {
+ bool promote = (periodic && BCM_PARP_ANNOUNCE_WAIT_REACH(tickcnt, entry)) ?
+ TRUE: FALSE;
+ parp_entry_t *node = NULL;
+
+ ip_ver = entry->ip.id;
+
+ if (prev == NULL)
+ ptable->parp_candidate_list = entry->next;
+ else
+ prev->next = entry->next;
+
+ node = bcm_l2_filter_parp_findentry(ptable,
+ entry->ip.data, IP_VER_6, TRUE, tickcnt);
+ if (promote && node == NULL) {
+ bcm_l2_filter_parp_addentry(osh, ptable, &entry->ea,
+ entry->ip.data, entry->ip.id, TRUE, tickcnt);
+ }
+ MFREE(osh, entry, sizeof(parp_entry_t) + entry->ip.len);
+ if (prev == NULL) {
+ entry = ptable->parp_candidate_list;
+ } else {
+ entry = prev->next;
+ }
+ }
+ else {
+ prev = entry;
+ entry = entry->next;
+ }
+ }
+}
+/* create 42 byte ARP packet for ARP response, aligned the Buffer */
+void *
+bcm_l2_filter_proxyarp_alloc_reply(osl_t* osh, uint16 pktlen, struct ether_addr *src_ea,
+ struct ether_addr *dst_ea, uint16 ea_type, bool snap, void **p)
+{
+ void *pkt;
+ uint8 *frame;
+
+ /* adjust pktlen since skb->data is aligned to 2 */
+ pktlen += ALIGN_ADJ_BUFLEN;
+
+ if ((pkt = PKTGET(osh, pktlen, FALSE)) == NULL) {
+ L2_FILTER_ERROR(("bcm_l2_filter_proxyarp_alloc_reply: PKTGET failed\n"));
+ return NULL;
+ }
+ /* adjust for pkt->data aligned */
+ PKTPULL(osh, pkt, ALIGN_ADJ_BUFLEN);
+ frame = PKTDATA(osh, pkt);
+
+ /* Create 14-byte eth header, plus snap header if applicable */
+ bcopy(src_ea, frame + ETHER_SRC_OFFSET, ETHER_ADDR_LEN);
+ bcopy(dst_ea, frame + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
+ if (snap) {
+ hton16_ua_store(pktlen, frame + ETHER_TYPE_OFFSET);
+ bcopy(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN);
+ hton16_ua_store(ea_type, frame + ETHER_HDR_LEN + SNAP_HDR_LEN);
+ } else
+ hton16_ua_store(ea_type, frame + ETHER_TYPE_OFFSET);
+
+ *p = (void *)(frame + ETHER_HDR_LEN + (snap ? SNAP_HDR_LEN + ETHER_TYPE_LEN : 0));
+ return pkt;
+}
+/* copy the smac entry from parp_table */
+void bcm_l2_filter_parp_get_smac(arp_table_t* ptable, void* smac)
+{
+ bcopy(ptable->parp_smac, smac, ETHER_ADDR_LEN);
+}
+/* copy the cmac entry from parp_table */
+void bcm_l2_filter_parp_get_cmac(arp_table_t* ptable, void* cmac)
+{
+ bcopy(ptable->parp_cmac, cmac, ETHER_ADDR_LEN);
+}
+/* copy the smac entry to smac entry in parp_table */
+void bcm_l2_filter_parp_set_smac(arp_table_t* ptable, void* smac)
+{
+ bcopy(smac, ptable->parp_smac, ETHER_ADDR_LEN);
+}
+/* copy the cmac entry to cmac entry in parp_table */
+void bcm_l2_filter_parp_set_cmac(arp_table_t* ptable, void* cmac)
+{
+ bcopy(cmac, ptable->parp_cmac, ETHER_ADDR_LEN);
+}
+
+uint16
+calc_checksum(uint8 *src_ipa, uint8 *dst_ipa, uint32 ul_len, uint8 prot, uint8 *ul_data)
+{
+ uint16 *startpos;
+ uint32 sum = 0;
+ int i;
+ uint16 answer = 0;
+
+ if (src_ipa) {
+ uint8 ph[8] = {0, };
+ for (i = 0; i < (IPV6_ADDR_LEN / 2); i++) {
+ sum += *((uint16 *)src_ipa);
+ src_ipa += 2;
+ }
+
+ for (i = 0; i < (IPV6_ADDR_LEN / 2); i++) {
+ sum += *((uint16 *)dst_ipa);
+ dst_ipa += 2;
+ }
+
+ *((uint32 *)ph) = hton32(ul_len);
+ *((uint32 *)(ph+4)) = 0;
+ ph[7] = prot;
+ startpos = (uint16 *)ph;
+ for (i = 0; i < 4; i++) {
+ sum += *startpos++;
+ }
+ }
+
+ startpos = (uint16 *)ul_data;
+ while (ul_len > 1) {
+ sum += *startpos++;
+ ul_len -= 2;
+ }
+
+ if (ul_len == 1) {
+ *((uint8 *)(&answer)) = *((uint8 *)startpos);
+ sum += answer;
+ }
+
+ sum = (sum >> 16) + (sum & 0xffff);
+ sum += (sum >> 16);
+ answer = ~sum;
+
+ return answer;
+}
+/*
+ * The length of the option including
+ * the type and length fields in units of 8 octets
+ */
+bcm_tlv_t *
+parse_nd_options(void *buf, int buflen, uint key)
+{
+ bcm_tlv_t *elt;
+ int totlen;
+
+ elt = (bcm_tlv_t*)buf;
+ totlen = buflen;
+
+ /* find tagged parameter */
+ while (totlen >= TLV_HDR_LEN) {
+ int len = elt->len * 8;
+
+ /* validate remaining totlen */
+ if ((elt->id == key) &&
+ (totlen >= len))
+ return (elt);
+
+ elt = (bcm_tlv_t*)((uint8*)elt + len);
+ totlen -= len;
+ }
+
+ return NULL;
+}
+
+/* returns 0 if tdls set up request or tdls discovery request */
+int
+bcm_l2_filter_block_tdls(osl_t *osh, void *pktbuf)
+{
+ uint16 ethertype;
+ uint8 *data;
+ int datalen;
+ bool snap;
+ uint8 action_field;
+
+ if (get_pkt_ether_type(osh, pktbuf, &data, &datalen, ðertype, &snap) != BCME_OK)
+ return BCME_ERROR;
+
+ if (ethertype != ETHER_TYPE_89_0D)
+ return BCME_ERROR;
+
+ /* validate payload type */
+ if (datalen < TDLS_PAYLOAD_TYPE_LEN + 2) {
+ L2_FILTER_ERROR(("bcm_l2_filter_block_tdls: wrong length for 89-0d eth frame %d\n",
+ datalen));
+ return BCME_ERROR;
+ }
+
+ /* validate payload type */
+ if (*data != TDLS_PAYLOAD_TYPE) {
+ L2_FILTER_ERROR(("bcm_l2_filter_block_tdls: wrong payload type for 89-0d"
+ " eth frame %d\n",
+ *data));
+ return BCME_ERROR;
+ }
+ data += TDLS_PAYLOAD_TYPE_LEN;
+
+ /* validate TDLS action category */
+ if (*data != TDLS_ACTION_CATEGORY_CODE) {
+ L2_FILTER_ERROR(("bcm_l2_filter_block_tdls: wrong TDLS Category %d\n", *data));
+ return BCME_ERROR;
+ }
+ data++;
+
+ action_field = *data;
+
+ if ((action_field == TDLS_SETUP_REQ) || (action_field == TDLS_DISCOVERY_REQ))
+ return BCME_OK;
+
+ return BCME_ERROR;
+}
diff --git a/bcmdhd.101.10.361.x/bcmbloom.c b/bcmdhd.101.10.361.x/bcmbloom.c
new file mode 100755
index 0000000..7660c88
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmbloom.c
@@ -0,0 +1,233 @@
+/*
+ * Bloom filter support
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+
+#include <stdarg.h>
+
+#ifdef BCMDRIVER
+#include <osl.h>
+#include <bcmutils.h>
+#else /* !BCMDRIVER */
+#include <stdio.h>
+#include <string.h>
+#ifndef ASSERT
+#define ASSERT(exp)
+#endif
+#endif /* !BCMDRIVER */
+#include <bcmutils.h>
+
+#include <bcmbloom.h>
+
+#define BLOOM_BIT_LEN(_x) ((_x) << 3)
+
+struct bcm_bloom_filter {
+ void *cb_ctx;
+ uint max_hash;
+ bcm_bloom_hash_t *hash; /* array of hash functions */
+ uint filter_size; /* in bytes */
+ uint8 *filter; /* can be NULL for validate only */
+};
+
+/* public interface */
+int
+bcm_bloom_create(bcm_bloom_alloc_t alloc_cb,
+ bcm_bloom_free_t free_cb, void *cb_ctx, uint max_hash,
+ uint filter_size, bcm_bloom_filter_t **bloom)
+{
+ int err = BCME_OK;
+ bcm_bloom_filter_t *bp = NULL;
+
+ if (!bloom || !alloc_cb || (max_hash == 0)) {
+ err = BCME_BADARG;
+ goto done;
+ }
+
+ bp = (*alloc_cb)(cb_ctx, sizeof(*bp));
+ if (!bp) {
+ err = BCME_NOMEM;
+ goto done;
+ }
+ memset(bp, 0, sizeof(*bp));
+
+ bp->cb_ctx = cb_ctx;
+ bp->max_hash = max_hash;
+ bp->hash = (*alloc_cb)(cb_ctx, sizeof(*bp->hash) * max_hash);
+ if (!bp->hash) {
+ err = BCME_NOMEM;
+ goto done;
+ }
+ memset(bp->hash, 0, sizeof(*bp->hash) * max_hash);
+
+ if (filter_size > 0) {
+ bp->filter = (*alloc_cb)(cb_ctx, filter_size);
+ if (!bp->filter) {
+ err = BCME_NOMEM;
+ goto done;
+ }
+ bp->filter_size = filter_size;
+ memset(bp->filter, 0, filter_size);
+ }
+
+ *bloom = bp;
+
+done:
+ if (err != BCME_OK)
+ bcm_bloom_destroy(&bp, free_cb);
+
+ return err;
+}
+
+int
+bcm_bloom_destroy(bcm_bloom_filter_t **bloom, bcm_bloom_free_t free_cb)
+{
+ int err = BCME_OK;
+ bcm_bloom_filter_t *bp;
+
+ if (!bloom || !*bloom || !free_cb)
+ goto done;
+
+ bp = *bloom;
+ *bloom = NULL;
+
+ if (bp->filter)
+ (*free_cb)(bp->cb_ctx, bp->filter, bp->filter_size);
+ if (bp->hash)
+ (*free_cb)(bp->cb_ctx, bp->hash,
+ sizeof(*bp->hash) * bp->max_hash);
+ (*free_cb)(bp->cb_ctx, bp, sizeof(*bp));
+
+done:
+ return err;
+}
+
+int
+bcm_bloom_add_hash(bcm_bloom_filter_t *bp, bcm_bloom_hash_t hash, uint *idx)
+{
+ uint i;
+
+ if (!bp || !hash || !idx)
+ return BCME_BADARG;
+
+ for (i = 0; i < bp->max_hash; ++i) {
+ if (bp->hash[i] == NULL)
+ break;
+ }
+
+ if (i >= bp->max_hash)
+ return BCME_NORESOURCE;
+
+ bp->hash[i] = hash;
+ *idx = i;
+ return BCME_OK;
+}
+
+int
+bcm_bloom_remove_hash(bcm_bloom_filter_t *bp, uint idx)
+{
+ if (!bp)
+ return BCME_BADARG;
+
+ if (idx >= bp->max_hash)
+ return BCME_NOTFOUND;
+
+ bp->hash[idx] = NULL;
+ return BCME_OK;
+}
+
+bool
+bcm_bloom_is_member(bcm_bloom_filter_t *bp,
+ const uint8 *tag, uint tag_len, const uint8 *buf, uint buf_len)
+{
+ uint i;
+ int err = BCME_OK;
+
+ if (!tag || (tag_len == 0)) /* empty tag is always a member */
+ goto done;
+
+ /* use internal buffer if none was specified */
+ if (!buf || (buf_len == 0)) {
+ if (!bp->filter) /* every one is a member of empty filter */
+ goto done;
+
+ buf = bp->filter;
+ buf_len = bp->filter_size;
+ }
+
+ for (i = 0; i < bp->max_hash; ++i) {
+ uint pos;
+ if (!bp->hash[i])
+ continue;
+ pos = (*bp->hash[i])(bp->cb_ctx, i, tag, tag_len);
+
+ /* all bits must be set for a match */
+ if (isclr(buf, pos % BLOOM_BIT_LEN(buf_len))) {
+ err = BCME_NOTFOUND;
+ break;
+ }
+ }
+
+done:
+ return err;
+}
+
+int
+bcm_bloom_add_member(bcm_bloom_filter_t *bp, const uint8 *tag, uint tag_len)
+{
+ uint i;
+
+ if (!bp || !tag || (tag_len == 0))
+ return BCME_BADARG;
+
+ if (!bp->filter) /* validate only */
+ return BCME_UNSUPPORTED;
+
+ for (i = 0; i < bp->max_hash; ++i) {
+ uint pos;
+ if (!bp->hash[i])
+ continue;
+ pos = (*bp->hash[i])(bp->cb_ctx, i, tag, tag_len);
+ setbit(bp->filter, pos % BLOOM_BIT_LEN(bp->filter_size));
+ }
+
+ return BCME_OK;
+}
+
+int bcm_bloom_get_filter_data(bcm_bloom_filter_t *bp,
+ uint buf_size, uint8 *buf, uint *buf_len)
+{
+ if (!bp)
+ return BCME_BADARG;
+
+ if (buf_len)
+ *buf_len = bp->filter_size;
+
+ if (buf_size < bp->filter_size)
+ return BCME_BUFTOOSHORT;
+
+ if (bp->filter && bp->filter_size)
+ memcpy(buf, bp->filter, bp->filter_size);
+
+ return BCME_OK;
+}
diff --git a/bcmdhd.101.10.361.x/bcmevent.c b/bcmdhd.101.10.361.x/bcmevent.c
new file mode 100755
index 0000000..a8cafcb
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmevent.c
@@ -0,0 +1,445 @@
+/*
+ * bcmevent read-only data shared by kernel or app layers
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <ethernet.h>
+#include <bcmeth.h>
+#include <bcmevent.h>
+#include <802.11.h>
+
+/* Table of event name strings for UIs and debugging dumps */
+typedef struct {
+ uint event;
+ const char *name;
+} bcmevent_name_str_t;
+
+/* Use the actual name for event tracing */
+#define BCMEVENT_NAME(_event) {(_event), #_event}
+
+/* this becomes static data when all code is changed to use
+ * the bcmevent_get_name() API
+ */
+static const bcmevent_name_str_t bcmevent_names[] = {
+ BCMEVENT_NAME(WLC_E_SET_SSID),
+ BCMEVENT_NAME(WLC_E_JOIN),
+ BCMEVENT_NAME(WLC_E_START),
+ BCMEVENT_NAME(WLC_E_AUTH),
+ BCMEVENT_NAME(WLC_E_AUTH_IND),
+ BCMEVENT_NAME(WLC_E_DEAUTH),
+ BCMEVENT_NAME(WLC_E_DEAUTH_IND),
+ BCMEVENT_NAME(WLC_E_ASSOC),
+ BCMEVENT_NAME(WLC_E_ASSOC_IND),
+ BCMEVENT_NAME(WLC_E_REASSOC),
+ BCMEVENT_NAME(WLC_E_REASSOC_IND),
+ BCMEVENT_NAME(WLC_E_DISASSOC),
+ BCMEVENT_NAME(WLC_E_DISASSOC_IND),
+ BCMEVENT_NAME(WLC_E_QUIET_START),
+ BCMEVENT_NAME(WLC_E_QUIET_END),
+ BCMEVENT_NAME(WLC_E_BEACON_RX),
+ BCMEVENT_NAME(WLC_E_LINK),
+ BCMEVENT_NAME(WLC_E_MIC_ERROR),
+ BCMEVENT_NAME(WLC_E_NDIS_LINK),
+ BCMEVENT_NAME(WLC_E_ROAM),
+ BCMEVENT_NAME(WLC_E_TXFAIL),
+ BCMEVENT_NAME(WLC_E_PMKID_CACHE),
+ BCMEVENT_NAME(WLC_E_RETROGRADE_TSF),
+ BCMEVENT_NAME(WLC_E_PRUNE),
+ BCMEVENT_NAME(WLC_E_AUTOAUTH),
+ BCMEVENT_NAME(WLC_E_EAPOL_MSG),
+ BCMEVENT_NAME(WLC_E_SCAN_COMPLETE),
+ BCMEVENT_NAME(WLC_E_ADDTS_IND),
+ BCMEVENT_NAME(WLC_E_DELTS_IND),
+ BCMEVENT_NAME(WLC_E_BCNSENT_IND),
+ BCMEVENT_NAME(WLC_E_BCNRX_MSG),
+ BCMEVENT_NAME(WLC_E_BCNLOST_MSG),
+ BCMEVENT_NAME(WLC_E_ROAM_PREP),
+ BCMEVENT_NAME(WLC_E_PFN_NET_FOUND),
+ BCMEVENT_NAME(WLC_E_PFN_SCAN_ALLGONE),
+ BCMEVENT_NAME(WLC_E_PFN_NET_LOST),
+ BCMEVENT_NAME(WLC_E_JOIN_START),
+ BCMEVENT_NAME(WLC_E_ROAM_START),
+ BCMEVENT_NAME(WLC_E_ASSOC_START),
+#ifdef EXT_STA
+ BCMEVENT_NAME(WLC_E_RESET_COMPLETE),
+ BCMEVENT_NAME(WLC_E_JOIN_START),
+ BCMEVENT_NAME(WLC_E_ROAM_START),
+ BCMEVENT_NAME(WLC_E_ASSOC_START),
+ BCMEVENT_NAME(WLC_E_ASSOC_RECREATED),
+ BCMEVENT_NAME(WLC_E_SPEEDY_RECREATE_FAIL),
+#endif /* EXT_STA */
+#if defined(IBSS_PEER_DISCOVERY_EVENT)
+ BCMEVENT_NAME(WLC_E_IBSS_ASSOC),
+#endif /* defined(IBSS_PEER_DISCOVERY_EVENT) */
+ BCMEVENT_NAME(WLC_E_RADIO),
+ BCMEVENT_NAME(WLC_E_PSM_WATCHDOG),
+ BCMEVENT_NAME(WLC_E_PROBREQ_MSG),
+ BCMEVENT_NAME(WLC_E_SCAN_CONFIRM_IND),
+ BCMEVENT_NAME(WLC_E_PSK_SUP),
+ BCMEVENT_NAME(WLC_E_COUNTRY_CODE_CHANGED),
+ BCMEVENT_NAME(WLC_E_EXCEEDED_MEDIUM_TIME),
+ BCMEVENT_NAME(WLC_E_ICV_ERROR),
+ BCMEVENT_NAME(WLC_E_UNICAST_DECODE_ERROR),
+ BCMEVENT_NAME(WLC_E_MULTICAST_DECODE_ERROR),
+ BCMEVENT_NAME(WLC_E_TRACE),
+ BCMEVENT_NAME(WLC_E_IF),
+#ifdef WLP2P
+ BCMEVENT_NAME(WLC_E_P2P_DISC_LISTEN_COMPLETE),
+#endif
+ BCMEVENT_NAME(WLC_E_RSSI),
+ BCMEVENT_NAME(WLC_E_PFN_SCAN_COMPLETE),
+ BCMEVENT_NAME(WLC_E_ACTION_FRAME),
+ BCMEVENT_NAME(WLC_E_ACTION_FRAME_RX),
+ BCMEVENT_NAME(WLC_E_ACTION_FRAME_COMPLETE),
+#if defined(NDIS)
+ BCMEVENT_NAME(WLC_E_PRE_ASSOC_IND),
+ BCMEVENT_NAME(WLC_E_PRE_REASSOC_IND),
+ BCMEVENT_NAME(WLC_E_CHANNEL_ADOPTED),
+ BCMEVENT_NAME(WLC_E_AP_STARTED),
+ BCMEVENT_NAME(WLC_E_DFS_AP_STOP),
+ BCMEVENT_NAME(WLC_E_DFS_AP_RESUME),
+ BCMEVENT_NAME(WLC_E_ASSOC_IND_NDIS),
+ BCMEVENT_NAME(WLC_E_REASSOC_IND_NDIS),
+ BCMEVENT_NAME(WLC_E_ACTION_FRAME_RX_NDIS),
+ BCMEVENT_NAME(WLC_E_AUTH_REQ),
+ BCMEVENT_NAME(WLC_E_IBSS_COALESCE),
+#endif /* #if defined(NDIS) */
+
+#ifdef BCMWAPI_WAI
+ BCMEVENT_NAME(WLC_E_WAI_STA_EVENT),
+ BCMEVENT_NAME(WLC_E_WAI_MSG),
+#endif /* BCMWAPI_WAI */
+
+ BCMEVENT_NAME(WLC_E_ESCAN_RESULT),
+ BCMEVENT_NAME(WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE),
+#ifdef WLP2P
+ BCMEVENT_NAME(WLC_E_PROBRESP_MSG),
+ BCMEVENT_NAME(WLC_E_P2P_PROBREQ_MSG),
+#endif
+#ifdef PROP_TXSTATUS
+ BCMEVENT_NAME(WLC_E_FIFO_CREDIT_MAP),
+#endif
+ BCMEVENT_NAME(WLC_E_WAKE_EVENT),
+ BCMEVENT_NAME(WLC_E_DCS_REQUEST),
+ BCMEVENT_NAME(WLC_E_RM_COMPLETE),
+ BCMEVENT_NAME(WLC_E_OVERLAY_REQ),
+ BCMEVENT_NAME(WLC_E_CSA_COMPLETE_IND),
+ BCMEVENT_NAME(WLC_E_EXCESS_PM_WAKE_EVENT),
+ BCMEVENT_NAME(WLC_E_PFN_SCAN_NONE),
+ BCMEVENT_NAME(WLC_E_PFN_SCAN_ALLGONE),
+#ifdef SOFTAP
+ BCMEVENT_NAME(WLC_E_GTK_PLUMBED),
+#endif
+ BCMEVENT_NAME(WLC_E_ASSOC_REQ_IE),
+ BCMEVENT_NAME(WLC_E_ASSOC_RESP_IE),
+ BCMEVENT_NAME(WLC_E_BEACON_FRAME_RX),
+#ifdef WLTDLS
+ BCMEVENT_NAME(WLC_E_TDLS_PEER_EVENT),
+#endif /* WLTDLS */
+ BCMEVENT_NAME(WLC_E_NATIVE),
+#ifdef WLPKTDLYSTAT
+ BCMEVENT_NAME(WLC_E_PKTDELAY_IND),
+#endif /* WLPKTDLYSTAT */
+ BCMEVENT_NAME(WLC_E_SERVICE_FOUND),
+ BCMEVENT_NAME(WLC_E_GAS_FRAGMENT_RX),
+ BCMEVENT_NAME(WLC_E_GAS_COMPLETE),
+ BCMEVENT_NAME(WLC_E_P2PO_ADD_DEVICE),
+ BCMEVENT_NAME(WLC_E_P2PO_DEL_DEVICE),
+#ifdef WLWNM
+ BCMEVENT_NAME(WLC_E_WNM_STA_SLEEP),
+#endif /* WLWNM */
+#if defined(WL_PROXDETECT) || defined(RTT_SUPPORT)
+ BCMEVENT_NAME(WLC_E_PROXD),
+#endif
+ BCMEVENT_NAME(WLC_E_CCA_CHAN_QUAL),
+ BCMEVENT_NAME(WLC_E_BSSID),
+#ifdef PROP_TXSTATUS
+ BCMEVENT_NAME(WLC_E_BCMC_CREDIT_SUPPORT),
+#endif
+ BCMEVENT_NAME(WLC_E_PSTA_PRIMARY_INTF_IND),
+ BCMEVENT_NAME(WLC_E_TXFAIL_THRESH),
+#ifdef WLAIBSS
+ BCMEVENT_NAME(WLC_E_AIBSS_TXFAIL),
+#endif /* WLAIBSS */
+#ifdef GSCAN_SUPPORT
+ BCMEVENT_NAME(WLC_E_PFN_GSCAN_FULL_RESULT),
+ BCMEVENT_NAME(WLC_E_PFN_SSID_EXT),
+#endif /* GSCAN_SUPPORT */
+#ifdef WLBSSLOAD_REPORT
+ BCMEVENT_NAME(WLC_E_BSS_LOAD),
+#endif
+#if defined(BT_WIFI_HANDOVER) || defined(WL_TBOW)
+ BCMEVENT_NAME(WLC_E_BT_WIFI_HANDOVER_REQ),
+#endif
+#ifdef WLFBT
+ BCMEVENT_NAME(WLC_E_FBT),
+#endif /* WLFBT */
+ BCMEVENT_NAME(WLC_E_AUTHORIZED),
+ BCMEVENT_NAME(WLC_E_PROBREQ_MSG_RX),
+
+#ifdef WLAWDL
+ BCMEVENT_NAME(WLC_E_AWDL_AW),
+ BCMEVENT_NAME(WLC_E_AWDL_ROLE),
+ BCMEVENT_NAME(WLC_E_AWDL_EVENT),
+#endif /* WLAWDL */
+
+ BCMEVENT_NAME(WLC_E_CSA_START_IND),
+ BCMEVENT_NAME(WLC_E_CSA_DONE_IND),
+ BCMEVENT_NAME(WLC_E_CSA_FAILURE_IND),
+ BCMEVENT_NAME(WLC_E_RMC_EVENT),
+ BCMEVENT_NAME(WLC_E_DPSTA_INTF_IND),
+ BCMEVENT_NAME(WLC_E_ALLOW_CREDIT_BORROW),
+ BCMEVENT_NAME(WLC_E_MSCH),
+ BCMEVENT_NAME(WLC_E_ULP),
+ BCMEVENT_NAME(WLC_E_NAN),
+ BCMEVENT_NAME(WLC_E_PKT_FILTER),
+ BCMEVENT_NAME(WLC_E_DMA_TXFLUSH_COMPLETE),
+ BCMEVENT_NAME(WLC_E_PSK_AUTH),
+ BCMEVENT_NAME(WLC_E_SDB_TRANSITION),
+ BCMEVENT_NAME(WLC_E_PFN_SCAN_BACKOFF),
+ BCMEVENT_NAME(WLC_E_PFN_BSSID_SCAN_BACKOFF),
+ BCMEVENT_NAME(WLC_E_AGGR_EVENT),
+ BCMEVENT_NAME(WLC_E_TVPM_MITIGATION),
+ BCMEVENT_NAME(WLC_E_SCAN),
+ BCMEVENT_NAME(WLC_E_SLOTTED_BSS_PEER_OP),
+ BCMEVENT_NAME(WLC_E_PHY_CAL),
+#ifdef WL_NAN
+ BCMEVENT_NAME(WLC_E_NAN_CRITICAL),
+ BCMEVENT_NAME(WLC_E_NAN_NON_CRITICAL),
+ BCMEVENT_NAME(WLC_E_NAN),
+#endif /* WL_NAN */
+ BCMEVENT_NAME(WLC_E_RPSNOA),
+ BCMEVENT_NAME(WLC_E_WA_LQM),
+ BCMEVENT_NAME(WLC_E_OBSS_DETECTION),
+ BCMEVENT_NAME(WLC_E_SC_CHAN_QUAL),
+ BCMEVENT_NAME(WLC_E_DYNSAR),
+ BCMEVENT_NAME(WLC_E_ROAM_CACHE_UPDATE),
+ BCMEVENT_NAME(WLC_E_AP_BCN_DRIFT),
+ BCMEVENT_NAME(WLC_E_PFN_SCAN_ALLGONE_EXT),
+#ifdef WL_CLIENT_SAE
+ BCMEVENT_NAME(WLC_E_AUTH_START),
+#endif /* WL_CLIENT_SAE */
+#ifdef WL_TWT
+ BCMEVENT_NAME(WLC_E_TWT_SETUP),
+ BCMEVENT_NAME(WLC_E_TWT_TEARDOWN),
+ BCMEVENT_NAME(WLC_E_TWT_INFO_FRM)
+#endif /* WL_TWT */
+};
+
+const char *bcmevent_get_name(uint event_type)
+{
+ /* note: first coded this as a static const but some
+ * ROMs already have something called event_name so
+ * changed it so we don't have a variable for the
+ * 'unknown string
+ */
+ const char *event_name = NULL;
+
+ uint idx;
+ for (idx = 0; idx < (uint)ARRAYSIZE(bcmevent_names); idx++) {
+
+ if (bcmevent_names[idx].event == event_type) {
+ event_name = bcmevent_names[idx].name;
+ break;
+ }
+ }
+
+ /* if we find an event name in the array, return it.
+ * otherwise return unknown string.
+ */
+ return ((event_name) ? event_name : "Unknown Event");
+}
+
+void
+wl_event_to_host_order(wl_event_msg_t * evt)
+{
+ /* Event struct members passed from dongle to host are stored in network
+ * byte order. Convert all members to host-order.
+ */
+ evt->event_type = ntoh32(evt->event_type);
+ evt->flags = ntoh16(evt->flags);
+ evt->status = ntoh32(evt->status);
+ evt->reason = ntoh32(evt->reason);
+ evt->auth_type = ntoh32(evt->auth_type);
+ evt->datalen = ntoh32(evt->datalen);
+ evt->version = ntoh16(evt->version);
+}
+
+void
+wl_event_to_network_order(wl_event_msg_t * evt)
+{
+ /* Event struct members passed from dongle to host are stored in network
+ * byte order. Convert all members to host-order.
+ */
+ evt->event_type = hton32(evt->event_type);
+ evt->flags = hton16(evt->flags);
+ evt->status = hton32(evt->status);
+ evt->reason = hton32(evt->reason);
+ evt->auth_type = hton32(evt->auth_type);
+ evt->datalen = hton32(evt->datalen);
+ evt->version = hton16(evt->version);
+}
+
+/*
+ * Validate if the event is proper and if valid copy event header to event.
+ * If proper event pointer is passed, to just validate, pass NULL to event.
+ *
+ * Return values are
+ * BCME_OK - It is a BRCM event or BRCM dongle event
+ * BCME_NOTFOUND - Not BRCM, not an event, may be okay
+ * BCME_BADLEN - Bad length, should not process, just drop
+ */
+int
+is_wlc_event_frame(void *pktdata, uint pktlen, uint16 exp_usr_subtype,
+ bcm_event_msg_u_t *out_event)
+{
+ uint16 evlen = 0; /* length in bcmeth_hdr */
+ uint16 subtype;
+ uint16 usr_subtype;
+ bcm_event_t *bcm_event;
+ uint8 *pktend;
+ uint8 *evend;
+ int err = BCME_OK;
+ uint32 data_len = 0; /* data length in bcm_event */
+
+ pktend = (uint8 *)pktdata + pktlen;
+ bcm_event = (bcm_event_t *)pktdata;
+
+ /* only care about 16-bit subtype / length versions */
+ if ((uint8 *)&bcm_event->bcm_hdr < pktend) {
+ uint8 short_subtype = *(uint8 *)&bcm_event->bcm_hdr;
+ if (!(short_subtype & 0x80)) {
+ err = BCME_NOTFOUND;
+ goto done;
+ }
+ }
+
+ /* must have both ether_header and bcmeth_hdr */
+ if (pktlen < OFFSETOF(bcm_event_t, event)) {
+ err = BCME_BADLEN;
+ goto done;
+ }
+
+ /* check length in bcmeth_hdr */
+
+#ifdef BCMDONGLEHOST
+ /* temporary - header length not always set properly. When the below
+ * !BCMDONGLEHOST is in all branches that use trunk DHD, the code
+ * under BCMDONGLEHOST can be removed.
+ */
+ evlen = (uint16)(pktend - (uint8 *)&bcm_event->bcm_hdr.version);
+#else
+ evlen = ntoh16_ua((void *)&bcm_event->bcm_hdr.length);
+#endif /* BCMDONGLEHOST */
+ evend = (uint8 *)&bcm_event->bcm_hdr.version + evlen;
+ if (evend != pktend) {
+ err = BCME_BADLEN;
+ goto done;
+ }
+
+ /* match on subtype, oui and usr subtype for BRCM events */
+ subtype = ntoh16_ua((void *)&bcm_event->bcm_hdr.subtype);
+ if (subtype != BCMILCP_SUBTYPE_VENDOR_LONG) {
+ err = BCME_NOTFOUND;
+ goto done;
+ }
+
+ if (bcmp(BRCM_OUI, &bcm_event->bcm_hdr.oui[0], DOT11_OUI_LEN)) {
+ err = BCME_NOTFOUND;
+ goto done;
+ }
+
+ /* if it is a bcm_event or bcm_dngl_event_t, validate it */
+ usr_subtype = ntoh16_ua((void *)&bcm_event->bcm_hdr.usr_subtype);
+ switch (usr_subtype) {
+ case BCMILCP_BCM_SUBTYPE_EVENT:
+ /* check that header length and pkt length are sufficient */
+ if ((pktlen < sizeof(bcm_event_t)) ||
+ (evend < ((uint8 *)bcm_event + sizeof(bcm_event_t)))) {
+ err = BCME_BADLEN;
+ goto done;
+ }
+
+ /* ensure data length in event is not beyond the packet. */
+ data_len = ntoh32_ua((void *)&bcm_event->event.datalen);
+ if ((sizeof(bcm_event_t) + data_len +
+ BCMILCP_BCM_SUBTYPE_EVENT_DATA_PAD) != pktlen) {
+ err = BCME_BADLEN;
+ goto done;
+ }
+
+ if (exp_usr_subtype && (exp_usr_subtype != usr_subtype)) {
+ err = BCME_NOTFOUND;
+ goto done;
+ }
+
+ if (out_event) {
+ /* ensure BRCM event pkt aligned */
+ memcpy(&out_event->event, &bcm_event->event, sizeof(wl_event_msg_t));
+ }
+
+ break;
+
+ case BCMILCP_BCM_SUBTYPE_DNGLEVENT:
+#if defined(HEALTH_CHECK) || defined(DNGL_EVENT_SUPPORT)
+ if ((pktlen < sizeof(bcm_dngl_event_t)) ||
+ (evend < ((uint8 *)bcm_event + sizeof(bcm_dngl_event_t)))) {
+ err = BCME_BADLEN;
+ goto done;
+ }
+
+ /* ensure data length in event is not beyond the packet. */
+ data_len = ntoh16_ua((void *)&((bcm_dngl_event_t *)pktdata)->dngl_event.datalen);
+ if ((sizeof(bcm_dngl_event_t) + data_len +
+ BCMILCP_BCM_SUBTYPE_EVENT_DATA_PAD) != pktlen) {
+ err = BCME_BADLEN;
+ goto done;
+ }
+
+ if (exp_usr_subtype && (exp_usr_subtype != usr_subtype)) {
+ err = BCME_NOTFOUND;
+ goto done;
+ }
+
+ if (out_event) {
+ /* ensure BRCM dngl event pkt aligned */
+ memcpy(&out_event->dngl_event, &((bcm_dngl_event_t *)pktdata)->dngl_event,
+ sizeof(bcm_dngl_event_msg_t));
+ }
+
+ break;
+#else
+ err = BCME_UNSUPPORTED;
+ break;
+#endif /* HEALTH_CHECK || DNGL_EVENT_SUPPORT */
+
+ default:
+ err = BCME_NOTFOUND;
+ goto done;
+ }
+
+ BCM_REFERENCE(data_len);
+done:
+ return err;
+}
diff --git a/bcmdhd.101.10.361.x/bcminternal-android.mk b/bcmdhd.101.10.361.x/bcminternal-android.mk
new file mode 100755
index 0000000..4fecfad
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcminternal-android.mk
@@ -0,0 +1,88 @@
+#
+# Broadcom Proprietary and Confidential. Copyright (C) 2020,
+# All Rights Reserved.
+#
+# This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom;
+# the contents of this file may not be disclosed to third parties,
+# copied or duplicated in any form, in whole or in part, without
+# the prior written permission of Broadcom.
+#
+#
+# <<Broadcom-WL-IPTag/Secret:>>
+
+# This file should be seen only by internal builds because it will
+# be mentioned only in internal filelists like brcm.flist.
+# See extended comment bcminternal.mk for details.
+
+BCMINTERNAL := 1
+
+BCMINTERNAL_DFLAGS += -DDHD_NO_MOG
+
+ifneq ($(CONFIG_BCMDHD_PCIE),)
+ # Enable Register access via dhd IOVAR
+ BCMINTERNAL_DFLAGS += -DDHD_PCIE_REG_ACCESS
+ # latency timestamping
+ BCMINTERNAL_DFLAGS += -DDHD_PKTTS
+ # Traffic Pattern Analysis on Socket Flow
+ BCMINTERNAL_DFLAGS += -DDHD_QOS_ON_SOCK_FLOW
+ # QoS unit testing support
+ BCMINTERNAL_DFLAGS += -DDHD_QOS_ON_SOCK_FLOW_UT
+ # Auto QOS
+ BCMINTERNAL_DFLAGS += -DWL_AUTO_QOS
+
+ ifneq ($(filter -DCUSTOMER_HW4, $(DHDCFLAGS)),)
+ # These will be moved to hw4 Makefile for 4389b0
+ BCMINTERNAL_DFLAGS += -DWBRC
+ BCMINTERNAL_DFLAGS += -DWLAN_ACCEL_BOOT
+ BCMINTERNAL_DFLAGS += -DDHD_HTPUT_TUNABLES
+ # BCMINTERNAL_DFLAGS += -DDHD_FIS_DUMP
+ # SCAN TYPES, if kernel < 4.17 ..back port support required
+ ifneq ($(CONFIG_CFG80211_SCANTYPE_BKPORT),)
+ DHDCFLAGS += -DWL_SCAN_TYPE
+ endif
+ # Jig builds
+ # No reset during dhd attach
+ BCMINTERNAL_DFLAGS += -DDHD_SKIP_DONGLE_RESET_IN_ATTACH
+ # Dongle Isolation will ensure no resets devreset ON/OFF
+ BCMINTERNAL_DFLAGS += -DDONGLE_ENABLE_ISOLATION
+ # Quiesce dongle using DB7 trap
+ BCMINTERNAL_DFLAGS += -DDHD_DONGLE_TRAP_IN_DETACH
+ # Collect socram during dongle init failurs for internal builds
+ BCMINTERNAL_DFLAGS += -DDEBUG_DNGL_INIT_FAIL
+ # Dongle reset during Wifi ON to keep in sane state
+ BCMINTERNAL_DFLAGS += -DFORCE_DONGLE_RESET_IN_DEVRESET_ON
+ # Perform Backplane Reset else FLR will happen
+ # BCMINTERNAL_DFLAGS += -DDHD_USE_BP_RESET_SS_CTRL
+ BCMINTERNAL_DFLAGS += -DWIFI_TURNOFF_DELAY=10
+
+ endif
+
+ # NCI_BUS support
+ BCMINTERNAL_DFLAGS += -DSOCI_NCI_BUS
+endif
+
+
+BCMINTERNAL_DFLAGS += -DDHD_BUS_MEM_ACCESS
+
+# Support multiple chips
+BCMINTERNAL_DFLAGS += -DSUPPORT_MULTIPLE_CHIPS
+
+# Support unreleased chips
+BCMINTERNAL_DFLAGS += -DUNRELEASEDCHIP
+
+# Collect socram if readshared fails
+BCMINTERNAL_DFLAGS += -DDEBUG_DNGL_INIT_FAIL
+
+# Force enable memdump value to DUMP_MEMFILE if it is disabled
+BCMINTERNAL_DFLAGS += -DDHD_INIT_DEFAULT_MEMDUMP
+
+ifneq ($(filter -DDHD_QOS_ON_SOCK_FLOW,$(BCMINTERNAL_DFLAGS)),)
+BCMINTERNAL_DHDOFILES += dhd_linux_sock_qos.o
+endif
+ifneq ($(filter -DSOCI_NCI_BUS,$(BCMINTERNAL_DFLAGS)),)
+BCMINTERNAL_DHDOFILES += nciutils.o
+endif
+ifneq ($(filter -DWBRC,$(BCMINTERNAL_DFLAGS)),)
+BCMINTERNAL_DHDOFILES += wb_regon_coordinator.o
+endif
+# vim: filetype=make shiftwidth=2
diff --git a/bcmdhd.101.10.361.x/bcminternal.mk b/bcmdhd.101.10.361.x/bcminternal.mk
new file mode 100755
index 0000000..eb94021
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcminternal.mk
@@ -0,0 +1,60 @@
+#
+# Broadcom Proprietary and Confidential. Copyright (C) 2020,
+# All Rights Reserved.
+#
+# This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom;
+# the contents of this file may not be disclosed to third parties,
+# copied or duplicated in any form, in whole or in part, without
+# the prior written permission of Broadcom.
+#
+#
+# <<Broadcom-WL-IPTag/Secret:>>
+
+# This file should be seen only by internal builds because it will
+# be mentioned only in internal filelists like brcm.flist. The idea
+# is that it will be conditionally included by makefiles using the
+# "-include" syntax, with the result that internal builds will see
+# this file and set BCMINTERNAL which will eventually result in a
+# -DBCMINTERNAL option passed to the compiler along with possible
+# other effects. External builds will never see it and it will be
+# silently ignored.
+#
+# Any settings which should not be exposed to customers may be
+# placed here. For instance, if we were working on a super-secret
+# new feature in supersecret.c we could set a variable here like
+# BCMINTERNAL_OBJECTS := supersecret.o
+# and later say
+# OBJECTS += $(BCMINTERNAL_OBJECTS)
+# within the main makefile.
+#
+# The key point is that this file is never shipped to customers
+# because it's present only in internal filelists so anything
+# here is private.
+
+BCMINTERNAL := 1
+
+BCMINTERNAL_DFLAGS += -DBCMINTERNAL
+BCMINTERNAL_DFLAGS += -DDHD_NO_MOG
+
+# Support unreleased chips
+BCMINTERNAL_DFLAGS += -DUNRELEASEDCHIP
+
+ifneq ($(findstring -fwtrace,-$(TARGET)-),)
+ BCMINTERNAL_DFLAGS += -DDHD_FWTRACE
+ BCMINTERNAL_CFILES += dhd_fwtrace.c
+endif
+
+# support only for SDIO MFG Fedora builds
+ifneq ($(findstring -sdstd-,-$(TARGET)-),)
+ ifneq ($(findstring -mfgtest-,-$(TARGET)-),)
+ BCMINTERNAL_DFLAGS += -DDHD_SPROM
+ BCMINTERNAL_CFILES += bcmsrom.c bcmotp.c
+ endif
+endif
+
+ifneq ($(findstring -pciefd-,$(TARGET)-),)
+# NCI_BUS support
+BCMINTERNAL_DFLAGS += -DSOCI_NCI_BUS -DBOOKER_NIC400_INF
+BCMINTERNAL_CFILES += nciutils.c
+endif
+# vim: filetype=make shiftwidth=2
diff --git a/bcmdhd.101.10.361.x/bcmsdh.c b/bcmdhd.101.10.361.x/bcmsdh.c
new file mode 100755
index 0000000..538f056
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmsdh.c
@@ -0,0 +1,953 @@
+/*
+ * BCMSDH interface glue
+ * implement bcmsdh API for SDIOH driver
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+/**
+ * @file bcmsdh.c
+ */
+
+/* ****************** BCMSDH Interface Functions *************************** */
+
+#include <typedefs.h>
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <hndsoc.h>
+#include <siutils.h>
+#if !defined(BCMDONGLEHOST)
+#include <bcmsrom.h>
+#endif /* !defined(BCMDONGLEHOST) */
+#include <osl.h>
+
+#include <bcmsdh.h> /* BRCM API for SDIO clients (such as wl, dhd) */
+#include <bcmsdbus.h> /* common SDIO/controller interface */
+#include <sbsdio.h> /* SDIO device core hardware definitions. */
+#include <sdio.h> /* SDIO Device and Protocol Specs */
+
+#if defined (BT_OVER_SDIO)
+#include <dhd_bt_interface.h>
+#endif /* defined (BT_OVER_SDIO) */
+
+#define SDIOH_API_ACCESS_RETRY_LIMIT 2
+const uint bcmsdh_msglevel = BCMSDH_ERROR_VAL;
+
+/* local copy of bcm sd handler */
+bcmsdh_info_t * l_bcmsdh = NULL;
+
+#if defined (BT_OVER_SDIO)
+struct sdio_func *func_f3 = NULL;
+static f3intr_handler processf3intr = NULL;
+static dhd_hang_notification process_dhd_hang_notification = NULL;
+static dhd_hang_state_t g_dhd_hang_state = NO_HANG_STATE;
+#endif /* defined (BT_OVER_SDIO) */
+
+#if defined(NDIS) && (NDISVER < 0x0630)
+extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd);
+#endif
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB) || defined(FORCE_WOWLAN)
+extern int
+sdioh_enable_hw_oob_intr(void *sdioh, bool enable);
+
+void
+bcmsdh_enable_hw_oob_intr(bcmsdh_info_t *sdh, bool enable)
+{
+ sdioh_enable_hw_oob_intr(sdh->sdioh, enable);
+}
+#endif
+
+#if defined (BT_OVER_SDIO)
+void bcmsdh_btsdio_process_hang_state(dhd_hang_state_t new_state)
+{
+ bool state_change = false;
+
+ BCMSDH_ERROR(("%s: DHD hang state changed - [%d] -> [%d]\n",
+ __FUNCTION__, g_dhd_hang_state, new_state));
+
+ if (g_dhd_hang_state == new_state)
+ return;
+
+ switch (g_dhd_hang_state) {
+ case NO_HANG_STATE:
+ if (HANG_START_STATE == new_state)
+ state_change = true;
+ break;
+
+ case HANG_START_STATE:
+ if (HANG_RECOVERY_STATE == new_state ||
+ NO_HANG_STATE == new_state)
+ state_change = true;
+
+ break;
+
+ case HANG_RECOVERY_STATE:
+ if (NO_HANG_STATE == new_state)
+ state_change = true;
+ break;
+
+ default:
+ BCMSDH_ERROR(("%s: Unhandled Hang state\n", __FUNCTION__));
+ break;
+ }
+
+ if (!state_change) {
+ BCMSDH_ERROR(("%s: Hang state cannot be changed\n", __FUNCTION__));
+ return;
+ }
+
+ g_dhd_hang_state = new_state;
+}
+
+void bcmsdh_btsdio_process_f3_intr(void)
+{
+ if (processf3intr && (g_dhd_hang_state == NO_HANG_STATE))
+ processf3intr(func_f3);
+}
+
+void bcmsdh_btsdio_process_dhd_hang_notification(bool wifi_recovery_completed)
+{
+ bcmsdh_btsdio_process_hang_state(HANG_START_STATE);
+
+ if (process_dhd_hang_notification)
+ process_dhd_hang_notification(func_f3, wifi_recovery_completed);
+
+ /* WiFi was off, so HANG_RECOVERY_STATE is not needed */
+ if (wifi_recovery_completed)
+ bcmsdh_btsdio_process_hang_state(NO_HANG_STATE);
+ else {
+ bcmsdh_btsdio_process_hang_state(HANG_RECOVERY_STATE);
+ }
+}
+
+void bcmsdh_btsdio_interface_init(struct sdio_func *func,
+ f3intr_handler f3intr_fun, dhd_hang_notification hang_notification)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)l_bcmsdh;
+ BCMSDH_INFO(("%s: func %p \n", __FUNCTION__, func));
+ func_f3 = func;
+ processf3intr = f3intr_fun;
+ sdioh_sdmmc_card_enable_func_f3(bcmsdh->sdioh, func);
+ process_dhd_hang_notification = hang_notification;
+
+} EXPORT_SYMBOL(bcmsdh_btsdio_interface_init);
+#endif /* defined (BT_OVER_SDIO) */
+
+/* Attach BCMSDH layer to SDIO Host Controller Driver
+ *
+ * @param osh OSL Handle.
+ * @param cfghdl Configuration Handle.
+ * @param regsva Virtual address of controller registers.
+ * @param irq Interrupt number of SDIO controller.
+ *
+ * @return bcmsdh_info_t Handle to BCMSDH context.
+ */
+bcmsdh_info_t *
+bcmsdh_attach(osl_t *osh, void *sdioh, ulong *regsva)
+{
+ bcmsdh_info_t *bcmsdh;
+
+ if ((bcmsdh = (bcmsdh_info_t *)MALLOC(osh, sizeof(bcmsdh_info_t))) == NULL) {
+ BCMSDH_ERROR(("bcmsdh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+ return NULL;
+ }
+ bzero((char *)bcmsdh, sizeof(bcmsdh_info_t));
+ bcmsdh->sdioh = sdioh;
+ bcmsdh->osh = osh;
+ bcmsdh->init_success = TRUE;
+ *regsva = si_enum_base(0);
+
+ bcmsdh_force_sbwad_calc(bcmsdh, FALSE);
+
+ /* Report the BAR, to fix if needed */
+ bcmsdh->sbwad = si_enum_base(0);
+
+ /* save the handler locally */
+ l_bcmsdh = bcmsdh;
+
+ return bcmsdh;
+}
+
+int
+bcmsdh_detach(osl_t *osh, void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ if (bcmsdh != NULL) {
+#if defined(NDIS) && (NDISVER < 0x0630)
+ if (bcmsdh->sdioh)
+ sdioh_detach(osh, bcmsdh->sdioh);
+#endif
+ MFREE(osh, bcmsdh, sizeof(bcmsdh_info_t));
+ }
+
+ l_bcmsdh = NULL;
+
+ return 0;
+}
+
+int
+bcmsdh_iovar_op(void *sdh, const char *name,
+ void *params, uint plen, void *arg, uint len, bool set)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ return sdioh_iovar_op(bcmsdh->sdioh, name, params, plen, arg, len, set);
+}
+
+bool
+bcmsdh_intr_query(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ bool on;
+
+ ASSERT(bcmsdh);
+ status = sdioh_interrupt_query(bcmsdh->sdioh, &on);
+ if (SDIOH_API_SUCCESS(status))
+ return FALSE;
+ else
+ return on;
+}
+
+int
+bcmsdh_intr_enable(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+#ifdef BCMSPI_ANDROID
+ uint32 data;
+#endif /* BCMSPI_ANDROID */
+ ASSERT(bcmsdh);
+
+ status = sdioh_interrupt_set(bcmsdh->sdioh, TRUE);
+#ifdef BCMSPI_ANDROID
+ data = bcmsdh_cfg_read_word(sdh, 0, 4, NULL);
+ data |= 0xE0E70000;
+ bcmsdh_cfg_write_word(sdh, 0, 4, data, NULL);
+#endif /* BCMSPI_ANDROID */
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_disable(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+#ifdef BCMSPI_ANDROID
+ uint32 data;
+#endif /* BCMSPI_ANDROID */
+ ASSERT(bcmsdh);
+
+ status = sdioh_interrupt_set(bcmsdh->sdioh, FALSE);
+#ifdef BCMSPI_ANDROID
+ data = bcmsdh_cfg_read_word(sdh, 0, 4, NULL);
+ data &= ~0xE0E70000;
+ bcmsdh_cfg_write_word(sdh, 0, 4, data, NULL);
+#endif /* BCMSPI_ANDROID */
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh);
+
+ status = sdioh_interrupt_register(bcmsdh->sdioh, fn, argh);
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_dereg(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh);
+
+ status = sdioh_interrupt_deregister(bcmsdh->sdioh);
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+#if defined(DHD_DEBUG) || defined(BCMDBG)
+bool
+bcmsdh_intr_pending(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ ASSERT(sdh);
+ return sdioh_interrupt_pending(bcmsdh->sdioh);
+}
+#endif
+
+int
+bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh)
+{
+ ASSERT(sdh);
+
+ /* don't support yet */
+ return BCME_UNSUPPORTED;
+}
+
+/**
+ * Read from SDIO Configuration Space
+ * @param sdh SDIO Host context.
+ * @param func_num Function number to read from.
+ * @param addr Address to read from.
+ * @param err Error return.
+ * @return value read from SDIO configuration space.
+ */
+uint8
+bcmsdh_cfg_read(void *sdh, uint fnc_num, uint32 addr, int *err)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ int32 retry = 0;
+#endif
+ uint8 data = 0;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ do {
+ if (retry) /* wait for 1 ms till bus get settled down */
+ OSL_DELAY(1000);
+#endif
+ status = sdioh_cfg_read(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data);
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ } while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
+#endif
+ if (err)
+ *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__,
+ fnc_num, addr, data));
+
+ return data;
+}
+#ifdef BCMSDH_MODULE
+EXPORT_SYMBOL(bcmsdh_cfg_read);
+#endif
+
+void
+bcmsdh_cfg_write(void *sdh, uint fnc_num, uint32 addr, uint8 data, int *err)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ int32 retry = 0;
+#endif
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ do {
+ if (retry) /* wait for 1 ms till bus get settled down */
+ OSL_DELAY(1000);
+#endif
+ status = sdioh_cfg_write(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data);
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ } while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
+#endif
+ if (err)
+ *err = SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR;
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__,
+ fnc_num, addr, data));
+}
+#ifdef BCMSDH_MODULE
+EXPORT_SYMBOL(bcmsdh_cfg_write);
+#endif
+
+uint32
+bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ uint32 data = 0;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+ status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_READ, fnc_num,
+ addr, &data, 4);
+
+ if (err)
+ *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__,
+ fnc_num, addr, data));
+
+ return data;
+}
+
+void
+bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+ status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, fnc_num,
+ addr, &data, 4);
+
+ if (err)
+ *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__, fnc_num,
+ addr, data));
+}
+
+int
+bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+
+ uint8 *tmp_buf, *tmp_ptr;
+ uint8 *ptr;
+ bool ascii = func & ~0xf;
+ func &= 0x7;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+ ASSERT(cis);
+ ASSERT(length <= SBSDIO_CIS_SIZE_LIMIT);
+
+ status = sdioh_cis_read(bcmsdh->sdioh, func, cis, length);
+
+ if (ascii) {
+ /* Move binary bits to tmp and format them into the provided buffer. */
+ if ((tmp_buf = (uint8 *)MALLOC(bcmsdh->osh, length)) == NULL) {
+ BCMSDH_ERROR(("%s: out of memory\n", __FUNCTION__));
+ return BCME_NOMEM;
+ }
+ bcopy(cis, tmp_buf, length);
+ for (tmp_ptr = tmp_buf, ptr = cis; ptr < (cis + length - 4); tmp_ptr++) {
+ ptr += snprintf((char*)ptr, (cis + length - ptr - 4),
+ "%.2x ", *tmp_ptr & 0xff);
+ if ((((tmp_ptr - tmp_buf) + 1) & 0xf) == 0)
+ ptr += snprintf((char *)ptr, (cis + length - ptr -4), "\n");
+ }
+ MFREE(bcmsdh->osh, tmp_buf, length);
+ }
+
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_cisaddr_read(void *sdh, uint func, uint8 *cisd, uint32 offset)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+
+ func &= 0x7;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+ ASSERT(cisd);
+
+ status = sdioh_cisaddr_read(bcmsdh->sdioh, func, cisd, offset);
+
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+
+int
+bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address, bool force_set)
+{
+ int err = 0;
+ uint bar0 = address & ~SBSDIO_SB_OFT_ADDR_MASK;
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ if (bar0 != bcmsdh->sbwad || force_set) {
+ bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
+ (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
+ if (!err)
+ bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID,
+ (address >> 16) & SBSDIO_SBADDRMID_MASK, &err);
+ if (!err)
+ bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH,
+ (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err);
+
+ if (!err)
+ bcmsdh->sbwad = bar0;
+ else
+ /* invalidate cached window var */
+ bcmsdh->sbwad = 0;
+
+#ifdef BCMDBG
+ if (err)
+ BCMSDH_ERROR(("%s: error setting address window %08x\n",
+ __FUNCTION__, address));
+#endif /* BCMDBG */
+ }
+
+ return err;
+}
+
+uint32
+bcmsdh_reg_read(void *sdh, uintptr addr, uint size)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ uint32 word = 0;
+
+ BCMSDH_INFO(("%s:fun = 1, addr = 0x%x\n", __FUNCTION__, (unsigned int)addr));
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+ if (bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, bcmsdh->force_sbwad_calc)) {
+ bcmsdh->regfail = TRUE; // terence 20130621: prevent dhd_dpc in dead lock
+ return 0xFFFFFFFF;
+ }
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ if (size == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL,
+ SDIOH_READ, SDIO_FUNC_1, addr, &word, size);
+
+ bcmsdh->regfail = !(SDIOH_API_SUCCESS(status));
+
+ BCMSDH_INFO(("uint32data = 0x%x\n", word));
+
+ /* if ok, return appropriately masked word */
+ /* XXX Masking was put in for NDIS port, remove if not needed */
+ if (SDIOH_API_SUCCESS(status)) {
+ switch (size) {
+ case sizeof(uint8):
+ return (word & 0xff);
+ case sizeof(uint16):
+ return (word & 0xffff);
+ case sizeof(uint32):
+ return word;
+ default:
+ bcmsdh->regfail = TRUE;
+
+ }
+ }
+
+ /* otherwise, bad sdio access or invalid size */
+ BCMSDH_ERROR(("%s: error reading addr 0x%x size %d\n",
+ __FUNCTION__, (unsigned int)addr, size));
+ return 0xFFFFFFFF;
+}
+
+uint32
+bcmsdh_reg_write(void *sdh, uintptr addr, uint size, uint32 data)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ int err = 0;
+
+ BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, uint%ddata = 0x%x\n",
+ __FUNCTION__, (unsigned int)addr, size*8, data));
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+ if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, bcmsdh->force_sbwad_calc))) {
+ bcmsdh->regfail = TRUE; // terence 20130621:
+ return err;
+ }
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ if (size == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+ status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, SDIO_FUNC_1,
+ addr, &data, size);
+ bcmsdh->regfail = !(SDIOH_API_SUCCESS(status));
+
+ if (SDIOH_API_SUCCESS(status))
+ return 0;
+
+ BCMSDH_ERROR(("%s: error writing 0x%08x to addr 0x%04x size %d\n",
+ __FUNCTION__, data, (unsigned int)addr, size));
+ return 0xFFFFFFFF;
+}
+
+bool
+bcmsdh_regfail(void *sdh)
+{
+ return ((bcmsdh_info_t *)sdh)->regfail;
+}
+
+int
+bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes, void *pkt,
+ bcmsdh_cmplt_fn_t complete_fn, void *handle)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ uint incr_fix;
+ uint width;
+ int err = 0;
+
+ ASSERT(bcmsdh);
+ ASSERT(bcmsdh->init_success);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
+ __FUNCTION__, fn, addr, nbytes));
+
+ /* Async not implemented yet */
+ ASSERT(!(flags & SDIO_REQ_ASYNC));
+ if (flags & SDIO_REQ_ASYNC)
+ return BCME_UNSUPPORTED;
+
+ if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)))
+ return err;
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+ incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+ width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+ if (width == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix,
+ SDIOH_READ, fn, addr, width, nbytes, buf, pkt);
+
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+}
+
+int
+bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes, void *pkt,
+ bcmsdh_cmplt_fn_t complete_fn, void *handle)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ uint incr_fix;
+ uint width;
+ int err = 0;
+
+ ASSERT(bcmsdh);
+ ASSERT(bcmsdh->init_success);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
+ __FUNCTION__, fn, addr, nbytes));
+
+ /* Async not implemented yet */
+ ASSERT(!(flags & SDIO_REQ_ASYNC));
+ if (flags & SDIO_REQ_ASYNC)
+ return BCME_UNSUPPORTED;
+
+ if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)))
+ return err;
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+ incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+ width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+ if (width == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix,
+ SDIOH_WRITE, fn, addr, width, nbytes, buf, pkt);
+
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+
+ ASSERT(bcmsdh);
+ ASSERT(bcmsdh->init_success);
+ ASSERT((addr & SBSDIO_SBWINDOW_MASK) == 0);
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, SDIOH_DATA_INC,
+ (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1,
+ addr, 4, nbytes, buf, NULL);
+
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_abort(void *sdh, uint fn)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ return sdioh_abort(bcmsdh->sdioh, fn);
+}
+
+int
+bcmsdh_start(void *sdh, int stage)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ return sdioh_start(bcmsdh->sdioh, stage);
+}
+
+int
+bcmsdh_stop(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ return sdioh_stop(bcmsdh->sdioh);
+}
+
+int
+bcmsdh_waitlockfree(void *sdh)
+{
+#ifdef LINUX
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ return sdioh_waitlockfree(bcmsdh->sdioh);
+#else
+ return 0;
+#endif
+}
+
+int
+bcmsdh_query_device(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+#if defined(BCMDONGLEHOST)
+ bcmsdh->vendevid = (VENDOR_BROADCOM << 16) | 0;
+#else
+ uint8 *fn0cis[1];
+ int err;
+ char *vars;
+ uint varsz;
+ osl_t *osh = bcmsdh->osh;
+
+ bcmsdh->vendevid = ~(0);
+
+ if (!(fn0cis[0] = MALLOC(osh, SBSDIO_CIS_SIZE_LIMIT))) {
+ BCMSDH_ERROR(("%s: CIS malloc failed\n", __FUNCTION__));
+ return (bcmsdh->vendevid);
+ }
+
+ bzero(fn0cis[0], SBSDIO_CIS_SIZE_LIMIT);
+
+ if ((err = bcmsdh_cis_read(sdh, 0, fn0cis[0], SBSDIO_CIS_SIZE_LIMIT))) {
+ BCMSDH_ERROR(("%s: CIS read err %d, report unknown BRCM device\n",
+ __FUNCTION__, err));
+ bcmsdh->vendevid = (VENDOR_BROADCOM << 16) | 0;
+ MFREE(osh, fn0cis[0], SBSDIO_CIS_SIZE_LIMIT);
+ return (bcmsdh->vendevid);
+ }
+
+ if (!err) {
+ if ((err = srom_parsecis(NULL, osh, fn0cis, 1, &vars, &varsz))) {
+ BCMSDH_ERROR(("%s: Error parsing CIS = %d\n", __FUNCTION__, err));
+ } else {
+ bcmsdh->vendevid = (getintvar(vars, "vendid") << 16) |
+ getintvar(vars, "devid");
+ MFREE(osh, vars, varsz);
+ }
+ }
+
+ MFREE(osh, fn0cis[0], SBSDIO_CIS_SIZE_LIMIT);
+#endif /* BCMDONGLEHOST */
+ return (bcmsdh->vendevid);
+}
+
+uint
+bcmsdh_query_iofnum(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ return (sdioh_query_iofnum(bcmsdh->sdioh));
+}
+
+int
+bcmsdh_reset(bcmsdh_info_t *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ return sdioh_sdio_reset(bcmsdh->sdioh);
+}
+
+/* XXX For use by NDIS port, remove if not needed. */
+void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh)
+{
+ ASSERT(sdh);
+ return sdh->sdioh;
+}
+
+/* Function to pass device-status bits to DHD. */
+uint32
+bcmsdh_get_dstatus(void *sdh)
+{
+#ifdef BCMSPI
+ bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+ sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+ return sdioh_get_dstatus(sd);
+#else
+ return 0;
+#endif /* BCMSPI */
+}
+uint32
+bcmsdh_cur_sbwad(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ return (bcmsdh->sbwad);
+}
+
+/* example usage: if force is TRUE, forces the bcmsdhsdio_set_sbaddr_window to
+ * calculate sbwad always instead of caching.
+ */
+void
+bcmsdh_force_sbwad_calc(void *sdh, bool force)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+ bcmsdh->force_sbwad_calc = force;
+}
+
+void
+bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev)
+{
+#ifdef BCMSPI
+ bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+ sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+ sdioh_chipinfo(sd, chip, chiprev);
+#else
+ return;
+#endif /* BCMSPI */
+}
+
+#ifdef BCMSPI
+void
+bcmsdh_dwordmode(void *sdh, bool set)
+{
+ bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+ sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+ sdioh_dwordmode(sd, set);
+ return;
+}
+#endif /* BCMSPI */
+
+int
+bcmsdh_sleep(void *sdh, bool enab)
+{
+#ifdef SDIOH_SLEEP_ENABLED
+ bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+ sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+ return sdioh_sleep(sd, enab);
+#else
+ return BCME_UNSUPPORTED;
+#endif
+}
+
+int
+bcmsdh_gpio_init(void *sdh)
+{
+ bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+ sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+ return sdioh_gpio_init(sd);
+}
+
+bool
+bcmsdh_gpioin(void *sdh, uint32 gpio)
+{
+ bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+ sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+ return sdioh_gpioin(sd, gpio);
+}
+
+int
+bcmsdh_gpioouten(void *sdh, uint32 gpio)
+{
+ bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+ sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+ return sdioh_gpioouten(sd, gpio);
+}
+
+int
+bcmsdh_gpioout(void *sdh, uint32 gpio, bool enab)
+{
+ bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+ sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+ return sdioh_gpioout(sd, gpio, enab);
+}
+
+uint
+bcmsdh_set_mode(void *sdh, uint mode)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ return (sdioh_set_mode(bcmsdh->sdioh, mode));
+}
+
+#ifdef PKT_STATICS
+uint32
+bcmsdh_get_spend_time(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ return (sdioh_get_spend_time(bcmsdh->sdioh));
+}
+#endif
diff --git a/bcmdhd.101.10.361.x/bcmsdh_linux.c b/bcmdhd.101.10.361.x/bcmsdh_linux.c
new file mode 100755
index 0000000..d297118
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmsdh_linux.c
@@ -0,0 +1,594 @@
+/*
+ * SDIO access interface for drivers - linux specific (pci only)
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+/**
+ * @file bcmsdh_linux.c
+ */
+
+#define __UNDEF_NO_VERSION__
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <linux/pci.h>
+#include <linux/completion.h>
+
+#include <osl.h>
+#include <pcicfg.h>
+#include <bcmdefs.h>
+#include <bcmdevs.h>
+#include <linux/irq.h>
+extern void dhdsdio_isr(void * args);
+#include <bcmutils.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#if defined(CONFIG_ARCH_ODIN)
+#include <linux/platform_data/gpio-odin.h>
+#endif /* defined(CONFIG_ARCH_ODIN) */
+#include <dhd_linux.h>
+
+/* driver info, initialized when bcmsdh_register is called */
+static bcmsdh_driver_t drvinfo = {NULL, NULL, NULL, NULL};
+
+typedef enum {
+ DHD_INTR_INVALID = 0,
+ DHD_INTR_INBAND,
+ DHD_INTR_HWOOB,
+ DHD_INTR_SWOOB
+} DHD_HOST_INTR_TYPE;
+
+/* the BCMSDH module comprises the generic part (bcmsdh.c) and OS specific layer (e.g.
+ * bcmsdh_linux.c). Put all OS specific variables (e.g. irq number and flags) here rather
+ * than in the common structure bcmsdh_info. bcmsdh_info only keeps a handle (os_ctx) to this
+ * structure.
+ */
+typedef struct bcmsdh_os_info {
+ DHD_HOST_INTR_TYPE intr_type;
+ int oob_irq_num; /* valid when hardware or software oob in use */
+ unsigned long oob_irq_flags; /* valid when hardware or software oob in use */
+ bool oob_irq_registered;
+ bool oob_irq_enabled;
+ bool oob_irq_wake_enabled;
+ spinlock_t oob_irq_spinlock;
+ bcmsdh_cb_fn_t oob_irq_handler;
+ void *oob_irq_handler_context;
+ void *context; /* context returned from upper layer */
+ void *sdioh; /* handle to lower layer (sdioh) */
+ void *dev; /* handle to the underlying device */
+ bool dev_wake_enabled;
+} bcmsdh_os_info_t;
+
+/* debugging macros */
+#ifdef BCMDBG_ERR
+#define SDLX_ERR(x) printf x
+#define SDLX_MSG(x) printf x
+#else
+#define SDLX_ERR(x) printf x
+#define SDLX_MSG(x) printf x
+#endif /* BCMDBG_ERR */
+
+/**
+ * Checks to see if vendor and device IDs match a supported SDIO Host Controller.
+ */
+bool
+bcmsdh_chipmatch(uint16 vendor, uint16 device)
+{
+ /* Add other vendors and devices as required */
+#ifdef BCMINTERNAL
+#ifdef BCMSDIOH_BCM
+ if (device == SDIOH_FPGA_ID && vendor == VENDOR_BROADCOM) {
+ return (TRUE);
+ }
+ if (device == BCM_SDIOH_ID && vendor == VENDOR_BROADCOM) {
+ return (TRUE);
+ }
+ if (device == BCM4710_DEVICE_ID && vendor == VENDOR_BROADCOM) {
+ return (TRUE);
+ }
+ /* For now still accept the old devid */
+ if (device == 0x4380 && vendor == VENDOR_BROADCOM) {
+ return (TRUE);
+ }
+#endif /* BCMSDIOH_BCM */
+#endif /* BCMINTERNAL */
+
+#ifdef BCMSDIOH_STD
+ /* Check for Arasan host controller */
+ if (vendor == VENDOR_SI_IMAGE) {
+ return (TRUE);
+ }
+ /* Check for BRCM 27XX Standard host controller */
+ if (device == BCM27XX_SDIOH_ID && vendor == VENDOR_BROADCOM) {
+ return (TRUE);
+ }
+ /* Check for BRCM Standard host controller */
+ if (device == SDIOH_FPGA_ID && vendor == VENDOR_BROADCOM) {
+ return (TRUE);
+ }
+ /* Check for TI PCIxx21 Standard host controller */
+ if (device == PCIXX21_SDIOH_ID && vendor == VENDOR_TI) {
+ return (TRUE);
+ }
+ if (device == PCIXX21_SDIOH0_ID && vendor == VENDOR_TI) {
+ return (TRUE);
+ }
+ /* Ricoh R5C822 Standard SDIO Host */
+ if (device == R5C822_SDIOH_ID && vendor == VENDOR_RICOH) {
+ return (TRUE);
+ }
+ /* JMicron Standard SDIO Host */
+ if (device == JMICRON_SDIOH_ID && vendor == VENDOR_JMICRON) {
+ return (TRUE);
+ }
+
+#ifdef BCMINTERNAL
+ /* Check for Jinvani (C-Guys) host controller */
+ if (device == JINVANI_SDIOH_ID && vendor == VENDOR_JINVANI) {
+ return (TRUE);
+ }
+#endif /* BCMINTERNAL */
+#endif /* BCMSDIOH_STD */
+#ifdef BCMSDIOH_SPI
+ /* This is the PciSpiHost. */
+ if (device == SPIH_FPGA_ID && vendor == VENDOR_BROADCOM) {
+ printf("Found PCI SPI Host Controller\n");
+ return (TRUE);
+ }
+
+#ifdef BCMINTERNAL
+ /* This is the SPI Host for QT. */
+ if (device == BCM_SPIH_ID && vendor == VENDOR_BROADCOM) {
+ printf("Found SPI Host Controller\n");
+ return (TRUE);
+ }
+#endif /* BCMINTERNAL */
+#endif /* BCMSDIOH_SPI */
+
+#ifdef BCMINTERNAL
+ /*
+ * XXX - This is a hack to get the GPL SdioLinux driver to load on Arasan/x86
+ * This is accomplished by installing a PciSpiHost into the system alongside the
+ * Arasan controller. The PciSpiHost is just used to get BCMSDH loaded.
+ */
+#ifdef BCMSDH_FD
+ if (device == SPIH_FPGA_ID && vendor == VENDOR_BROADCOM) {
+ printf("Found SdioLinux Host Controller\n");
+ return (TRUE);
+ }
+#endif /* BCMSDH_FD */
+#endif /* BCMINTERNAL */
+ return (FALSE);
+}
+
+void* bcmsdh_probe(osl_t *osh, void *dev, void *sdioh, void *adapter_info, uint bus_type,
+ uint bus_num, uint slot_num)
+{
+ ulong regs;
+ bcmsdh_info_t *bcmsdh;
+ uint32 vendevid;
+ bcmsdh_os_info_t *bcmsdh_osinfo = NULL;
+
+ bcmsdh = bcmsdh_attach(osh, sdioh, ®s);
+ if (bcmsdh == NULL) {
+ SDLX_ERR(("%s: bcmsdh_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+ bcmsdh_osinfo = MALLOC(osh, sizeof(bcmsdh_os_info_t));
+ if (bcmsdh_osinfo == NULL) {
+ SDLX_ERR(("%s: failed to allocate bcmsdh_os_info_t\n", __FUNCTION__));
+ goto err;
+ }
+ bzero((char *)bcmsdh_osinfo, sizeof(bcmsdh_os_info_t));
+ bcmsdh->os_cxt = bcmsdh_osinfo;
+ bcmsdh_osinfo->sdioh = sdioh;
+ bcmsdh_osinfo->dev = dev;
+ osl_set_bus_handle(osh, bcmsdh);
+
+#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ if (dev && device_init_wakeup(dev, true) == 0)
+ bcmsdh_osinfo->dev_wake_enabled = TRUE;
+#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+
+#if defined(OOB_INTR_ONLY)
+ spin_lock_init(&bcmsdh_osinfo->oob_irq_spinlock);
+ /* Get customer specific OOB IRQ parametres: IRQ number as IRQ type */
+ bcmsdh_osinfo->oob_irq_num = wifi_platform_get_irq_number(adapter_info,
+ &bcmsdh_osinfo->oob_irq_flags);
+ if (bcmsdh_osinfo->oob_irq_num < 0) {
+ SDLX_ERR(("%s: Host OOB irq is not defined\n", __FUNCTION__));
+ goto err;
+ }
+#endif /* defined(BCMLXSDMMC) */
+
+ /* Read the vendor/device ID from the CIS */
+ vendevid = bcmsdh_query_device(bcmsdh);
+ /* try to attach to the target device */
+ bcmsdh_osinfo->context = drvinfo.probe((vendevid >> 16), (vendevid & 0xFFFF), bus_num,
+ slot_num, 0, bus_type, (void *)regs, osh, bcmsdh);
+ if (bcmsdh_osinfo->context == NULL) {
+ SDLX_ERR(("%s: device attach failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ return bcmsdh;
+
+ /* error handling */
+err:
+ if (bcmsdh != NULL)
+ bcmsdh_detach(osh, bcmsdh);
+ if (bcmsdh_osinfo != NULL)
+ MFREE(osh, bcmsdh_osinfo, sizeof(bcmsdh_os_info_t));
+ return NULL;
+}
+
+int bcmsdh_remove(bcmsdh_info_t *bcmsdh)
+{
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ if (bcmsdh_osinfo->dev)
+ device_init_wakeup(bcmsdh_osinfo->dev, false);
+ bcmsdh_osinfo->dev_wake_enabled = FALSE;
+#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+
+ drvinfo.remove(bcmsdh_osinfo->context);
+ MFREE(bcmsdh->osh, bcmsdh->os_cxt, sizeof(bcmsdh_os_info_t));
+ bcmsdh_detach(bcmsdh->osh, bcmsdh);
+
+ return 0;
+}
+
+#ifdef DHD_WAKE_STATUS
+int bcmsdh_get_total_wake(bcmsdh_info_t *bcmsdh)
+{
+ return bcmsdh->total_wake_count;
+}
+
+int bcmsdh_set_get_wake(bcmsdh_info_t *bcmsdh, int flag)
+{
+#if defined(OOB_INTR_ONLY)
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+ unsigned long flags;
+#endif
+ int ret = 0;
+
+#if defined(OOB_INTR_ONLY)
+ spin_lock_irqsave(&bcmsdh_osinfo->oob_irq_spinlock, flags);
+
+ ret = bcmsdh->pkt_wake;
+ bcmsdh->total_wake_count += flag;
+ bcmsdh->pkt_wake = flag;
+
+ spin_unlock_irqrestore(&bcmsdh_osinfo->oob_irq_spinlock, flags);
+#endif
+ return ret;
+}
+#endif /* DHD_WAKE_STATUS */
+
+int bcmsdh_suspend(bcmsdh_info_t *bcmsdh)
+{
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+ if (drvinfo.suspend && drvinfo.suspend(bcmsdh_osinfo->context))
+ return -EBUSY;
+ return 0;
+}
+
+int bcmsdh_resume(bcmsdh_info_t *bcmsdh)
+{
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+ if (drvinfo.resume)
+ return drvinfo.resume(bcmsdh_osinfo->context);
+ return 0;
+}
+
+extern int bcmsdh_register_client_driver(void);
+extern void bcmsdh_unregister_client_driver(void);
+extern int sdio_func_reg_notify(void* semaphore);
+extern void sdio_func_unreg_notify(void);
+
+#if defined(BCMLXSDMMC)
+int bcmsdh_reg_sdio_notify(void* semaphore)
+{
+ return sdio_func_reg_notify(semaphore);
+}
+
+void bcmsdh_unreg_sdio_notify(void)
+{
+ sdio_func_unreg_notify();
+}
+#endif /* defined(BCMLXSDMMC) */
+
+int
+bcmsdh_register(bcmsdh_driver_t *driver)
+{
+ int error = 0;
+
+ drvinfo = *driver;
+ SDLX_MSG(("%s: register client driver\n", __FUNCTION__));
+ error = bcmsdh_register_client_driver();
+ if (error)
+ SDLX_ERR(("%s: failed %d\n", __FUNCTION__, error));
+
+ return error;
+}
+
+void
+bcmsdh_unregister(void)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+ if (bcmsdh_pci_driver.node.next == NULL)
+ return;
+#endif
+
+ bcmsdh_unregister_client_driver();
+}
+
+void bcmsdh_dev_pm_stay_awake(bcmsdh_info_t *bcmsdh)
+{
+#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+ pm_stay_awake(bcmsdh_osinfo->dev);
+#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+}
+
+void bcmsdh_dev_relax(bcmsdh_info_t *bcmsdh)
+{
+#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+ pm_relax(bcmsdh_osinfo->dev);
+#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+}
+
+bool bcmsdh_dev_pm_enabled(bcmsdh_info_t *bcmsdh)
+{
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+ return bcmsdh_osinfo->dev_wake_enabled;
+}
+
+#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
+int bcmsdh_get_oob_intr_num(bcmsdh_info_t *bcmsdh)
+{
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+ return bcmsdh_osinfo->oob_irq_num;
+}
+
+void bcmsdh_oob_intr_set(bcmsdh_info_t *bcmsdh, bool enable)
+{
+ unsigned long flags;
+ bcmsdh_os_info_t *bcmsdh_osinfo;
+
+ if (!bcmsdh)
+ return;
+
+ bcmsdh_osinfo = bcmsdh->os_cxt;
+ spin_lock_irqsave(&bcmsdh_osinfo->oob_irq_spinlock, flags);
+ if (bcmsdh_osinfo->oob_irq_enabled != enable) {
+ if (enable)
+ enable_irq(bcmsdh_osinfo->oob_irq_num);
+ else
+ disable_irq_nosync(bcmsdh_osinfo->oob_irq_num);
+ bcmsdh_osinfo->oob_irq_enabled = enable;
+ }
+ spin_unlock_irqrestore(&bcmsdh_osinfo->oob_irq_spinlock, flags);
+}
+
+#ifdef ENABLE_WAKEUP_PKT_DUMP
+extern volatile bool dhd_mmc_suspend;
+extern volatile bool dhd_mmc_wake;
+#endif /* ENABLE_WAKEUP_PKT_DUMP */
+
+static irqreturn_t wlan_oob_irq(int irq, void *dev_id)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)dev_id;
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+#ifndef BCMSPI_ANDROID
+ bcmsdh_oob_intr_set(bcmsdh, FALSE);
+#endif /* !BCMSPI_ANDROID */
+ bcmsdh_osinfo->oob_irq_handler(bcmsdh_osinfo->oob_irq_handler_context);
+
+#ifdef ENABLE_WAKEUP_PKT_DUMP
+ if (dhd_mmc_suspend) {
+ dhd_mmc_wake = TRUE;
+ }
+#endif /* ENABLE_WAKEUP_PKT_DUMP */
+
+ return IRQ_HANDLED;
+}
+
+int bcmsdh_oob_intr_register(bcmsdh_info_t *bcmsdh, bcmsdh_cb_fn_t oob_irq_handler,
+ void* oob_irq_handler_context)
+{
+ int err = 0;
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+ if (bcmsdh_osinfo->oob_irq_registered) {
+ SDLX_ERR(("%s: irq is already registered\n", __FUNCTION__));
+ return -EBUSY;
+ }
+#ifdef HW_OOB
+ SDLX_MSG(("%s: HW_OOB irq=%d flags=0x%X\n", __FUNCTION__,
+ (int)bcmsdh_osinfo->oob_irq_num, (int)bcmsdh_osinfo->oob_irq_flags));
+#else
+ SDLX_MSG(("%s: SW_OOB irq=%d flags=0x%X\n", __FUNCTION__,
+ (int)bcmsdh_osinfo->oob_irq_num, (int)bcmsdh_osinfo->oob_irq_flags));
+#endif
+ bcmsdh_osinfo->oob_irq_handler = oob_irq_handler;
+ bcmsdh_osinfo->oob_irq_handler_context = oob_irq_handler_context;
+ bcmsdh_osinfo->oob_irq_enabled = TRUE;
+ bcmsdh_osinfo->oob_irq_registered = TRUE;
+#if defined(CONFIG_ARCH_ODIN)
+ err = odin_gpio_sms_request_irq(bcmsdh_osinfo->oob_irq_num, wlan_oob_irq,
+ bcmsdh_osinfo->oob_irq_flags, "bcmsdh_sdmmc", bcmsdh);
+#else
+ err = request_irq(bcmsdh_osinfo->oob_irq_num, wlan_oob_irq,
+ bcmsdh_osinfo->oob_irq_flags, "bcmsdh_sdmmc", bcmsdh);
+#endif /* defined(CONFIG_ARCH_ODIN) */
+ if (err) {
+ SDLX_ERR(("%s: request_irq failed with %d\n", __FUNCTION__, err));
+ bcmsdh_osinfo->oob_irq_enabled = FALSE;
+ bcmsdh_osinfo->oob_irq_registered = FALSE;
+ return err;
+ }
+
+#if defined(DISABLE_WOWLAN)
+ SDLX_MSG(("%s: disable_irq_wake\n", __FUNCTION__));
+ bcmsdh_osinfo->oob_irq_wake_enabled = FALSE;
+#else
+#if defined(CONFIG_ARCH_RHEA) || defined(CONFIG_ARCH_CAPRI)
+ if (device_may_wakeup(bcmsdh_osinfo->dev)) {
+#endif /* CONFIG_ARCH_RHEA || CONFIG_ARCH_CAPRI */
+ err = enable_irq_wake(bcmsdh_osinfo->oob_irq_num);
+ if (err)
+ SDLX_ERR(("%s: enable_irq_wake failed with %d\n", __FUNCTION__, err));
+ else
+ bcmsdh_osinfo->oob_irq_wake_enabled = TRUE;
+#if defined(CONFIG_ARCH_RHEA) || defined(CONFIG_ARCH_CAPRI)
+ }
+#endif /* CONFIG_ARCH_RHEA || CONFIG_ARCH_CAPRI */
+#endif
+
+ return 0;
+}
+
+void bcmsdh_oob_intr_unregister(bcmsdh_info_t *bcmsdh)
+{
+ int err = 0;
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+ SDLX_MSG(("%s: Enter\n", __FUNCTION__));
+ if (!bcmsdh_osinfo->oob_irq_registered) {
+ SDLX_MSG(("%s: irq is not registered\n", __FUNCTION__));
+ return;
+ }
+ if (bcmsdh_osinfo->oob_irq_wake_enabled) {
+#if defined(CONFIG_ARCH_RHEA) || defined(CONFIG_ARCH_CAPRI)
+ if (device_may_wakeup(bcmsdh_osinfo->dev)) {
+#endif /* CONFIG_ARCH_RHEA || CONFIG_ARCH_CAPRI */
+ err = disable_irq_wake(bcmsdh_osinfo->oob_irq_num);
+ if (!err)
+ bcmsdh_osinfo->oob_irq_wake_enabled = FALSE;
+#if defined(CONFIG_ARCH_RHEA) || defined(CONFIG_ARCH_CAPRI)
+ }
+#endif /* CONFIG_ARCH_RHEA || CONFIG_ARCH_CAPRI */
+ }
+ if (bcmsdh_osinfo->oob_irq_enabled) {
+ disable_irq(bcmsdh_osinfo->oob_irq_num);
+ bcmsdh_osinfo->oob_irq_enabled = FALSE;
+ }
+ free_irq(bcmsdh_osinfo->oob_irq_num, bcmsdh);
+ bcmsdh_osinfo->oob_irq_registered = FALSE;
+}
+#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
+
+/* Module parameters specific to each host-controller driver */
+/* XXX Need to move these to where they really belong! */
+
+extern uint sd_msglevel; /* Debug message level */
+module_param(sd_msglevel, uint, 0);
+
+extern uint sd_power; /* 0 = SD Power OFF, 1 = SD Power ON. */
+module_param(sd_power, uint, 0);
+
+extern uint sd_clock; /* SD Clock Control, 0 = SD Clock OFF, 1 = SD Clock ON */
+module_param(sd_clock, uint, 0);
+
+extern uint sd_divisor; /* Divisor (-1 means external clock) */
+module_param(sd_divisor, uint, 0);
+
+extern uint sd_sdmode; /* Default is SD4, 0=SPI, 1=SD1, 2=SD4 */
+module_param(sd_sdmode, uint, 0);
+
+extern uint sd_hiok; /* Ok to use hi-speed mode */
+module_param(sd_hiok, uint, 0);
+
+extern uint sd_f2_blocksize;
+module_param(sd_f2_blocksize, int, 0);
+
+extern uint sd_f1_blocksize;
+module_param(sd_f1_blocksize, int, 0);
+
+#ifdef BCMSDIOH_STD
+extern int sd_uhsimode;
+module_param(sd_uhsimode, int, 0);
+extern uint sd_tuning_period;
+module_param(sd_tuning_period, uint, 0);
+extern int sd_delay_value;
+module_param(sd_delay_value, uint, 0);
+
+/* SDIO Drive Strength for UHSI mode specific to SDIO3.0 */
+extern char dhd_sdiod_uhsi_ds_override[2];
+module_param_string(dhd_sdiod_uhsi_ds_override, dhd_sdiod_uhsi_ds_override, 2, 0);
+
+#endif
+
+#ifdef BCMSDH_MODULE
+EXPORT_SYMBOL(bcmsdh_attach);
+EXPORT_SYMBOL(bcmsdh_detach);
+EXPORT_SYMBOL(bcmsdh_intr_query);
+EXPORT_SYMBOL(bcmsdh_intr_enable);
+EXPORT_SYMBOL(bcmsdh_intr_disable);
+EXPORT_SYMBOL(bcmsdh_intr_reg);
+EXPORT_SYMBOL(bcmsdh_intr_dereg);
+
+#if defined(DHD_DEBUG) || defined(BCMDBG)
+EXPORT_SYMBOL(bcmsdh_intr_pending);
+#endif
+
+#if defined (BT_OVER_SDIO)
+EXPORT_SYMBOL(bcmsdh_btsdio_interface_init);
+#endif /* defined (BT_OVER_SDIO) */
+
+EXPORT_SYMBOL(bcmsdh_devremove_reg);
+EXPORT_SYMBOL(bcmsdh_cfg_read);
+EXPORT_SYMBOL(bcmsdh_cfg_write);
+EXPORT_SYMBOL(bcmsdh_cis_read);
+EXPORT_SYMBOL(bcmsdh_reg_read);
+EXPORT_SYMBOL(bcmsdh_reg_write);
+EXPORT_SYMBOL(bcmsdh_regfail);
+EXPORT_SYMBOL(bcmsdh_send_buf);
+EXPORT_SYMBOL(bcmsdh_recv_buf);
+
+EXPORT_SYMBOL(bcmsdh_rwdata);
+EXPORT_SYMBOL(bcmsdh_abort);
+EXPORT_SYMBOL(bcmsdh_query_device);
+EXPORT_SYMBOL(bcmsdh_query_iofnum);
+EXPORT_SYMBOL(bcmsdh_iovar_op);
+EXPORT_SYMBOL(bcmsdh_register);
+EXPORT_SYMBOL(bcmsdh_unregister);
+EXPORT_SYMBOL(bcmsdh_chipmatch);
+EXPORT_SYMBOL(bcmsdh_reset);
+EXPORT_SYMBOL(bcmsdh_waitlockfree);
+
+EXPORT_SYMBOL(bcmsdh_get_dstatus);
+EXPORT_SYMBOL(bcmsdh_cfg_read_word);
+EXPORT_SYMBOL(bcmsdh_cfg_write_word);
+EXPORT_SYMBOL(bcmsdh_cur_sbwad);
+EXPORT_SYMBOL(bcmsdh_chipinfo);
+
+#endif /* BCMSDH_MODULE */
diff --git a/bcmdhd.101.10.361.x/bcmsdh_sdmmc.c b/bcmdhd.101.10.361.x/bcmsdh_sdmmc.c
new file mode 100755
index 0000000..596c02f
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmsdh_sdmmc.c
@@ -0,0 +1,2004 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+#include <typedefs.h>
+
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <osl.h>
+#include <sdio.h> /* SDIO Device and Protocol Specs */
+#include <sdioh.h> /* Standard SDIO Host Controller Specification */
+#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h> /* ioctl/iovars */
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined (CONFIG_PM_SLEEP)
+#include <linux/suspend.h>
+extern volatile bool dhd_mmc_suspend;
+#endif
+#include "bcmsdh_sdmmc.h"
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)) || \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+static inline void
+mmc_host_clk_hold(struct mmc_host *host)
+{
+ BCM_REFERENCE(host);
+ return;
+}
+
+static inline void
+mmc_host_clk_release(struct mmc_host *host)
+{
+ BCM_REFERENCE(host);
+ return;
+}
+
+static inline unsigned int
+mmc_host_clk_rate(struct mmc_host *host)
+{
+ return host->ios.clock;
+}
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0) */
+
+#ifndef BCMSDH_MODULE
+extern int sdio_function_init(void);
+extern void sdio_function_cleanup(void);
+#endif /* BCMSDH_MODULE */
+
+#if !defined(OOB_INTR_ONLY)
+static void IRQHandler(struct sdio_func *func);
+static void IRQHandlerF2(struct sdio_func *func);
+#endif /* !defined(OOB_INTR_ONLY) */
+static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr);
+#if defined(ENABLE_INSMOD_NO_FW_LOAD) && !defined(BUS_POWER_RESTORE)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) && defined(MMC_SW_RESET)
+extern int mmc_sw_reset(struct mmc_host *host);
+#else
+extern int sdio_reset_comm(struct mmc_card *card);
+#endif
+#endif
+#ifdef GLOBAL_SDMMC_INSTANCE
+extern PBCMSDH_SDMMC_INSTANCE gInstance;
+#endif
+
+#define DEFAULT_SDIO_F2_BLKSIZE 512
+#ifndef CUSTOM_SDIO_F2_BLKSIZE
+#define CUSTOM_SDIO_F2_BLKSIZE DEFAULT_SDIO_F2_BLKSIZE
+#endif
+
+#define DEFAULT_SDIO_F1_BLKSIZE 64
+#ifndef CUSTOM_SDIO_F1_BLKSIZE
+#define CUSTOM_SDIO_F1_BLKSIZE DEFAULT_SDIO_F1_BLKSIZE
+#endif
+
+#define MAX_IO_RW_EXTENDED_BLK 511
+
+uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */
+uint sd_f2_blocksize = CUSTOM_SDIO_F2_BLKSIZE;
+uint sd_f1_blocksize = CUSTOM_SDIO_F1_BLKSIZE;
+
+#if defined (BT_OVER_SDIO)
+uint sd_f3_blocksize = 64;
+#endif /* defined (BT_OVER_SDIO) */
+
+uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz */
+
+uint sd_power = 1; /* Default to SD Slot powered ON */
+uint sd_clock = 1; /* Default to SD Clock turned ON */
+uint sd_hiok = FALSE; /* Don't use hi-speed mode by default */
+uint sd_msglevel = SDH_ERROR_VAL;
+uint sd_use_dma = TRUE;
+
+#ifndef CUSTOM_RXCHAIN
+#define CUSTOM_RXCHAIN 0
+#endif
+
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait);
+
+#define DMA_ALIGN_MASK 0x03
+#define MMC_SDIO_ABORT_RETRY_LIMIT 5
+
+int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data);
+#ifdef NOTYET
+static int
+sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data);
+#endif /* NOTYET */
+
+#if defined (BT_OVER_SDIO)
+extern
+void sdioh_sdmmc_card_enable_func_f3(sdioh_info_t *sd, struct sdio_func *func)
+{
+ sd->func[3] = func;
+ sd_info(("%s sd->func[3] %p\n", __FUNCTION__, sd->func[3]));
+}
+#endif /* defined (BT_OVER_SDIO) */
+
+void sdmmc_set_clock_rate(sdioh_info_t *sd, uint hz);
+uint sdmmc_get_clock_rate(sdioh_info_t *sd);
+void sdmmc_set_clock_divisor(sdioh_info_t *sd, uint sd_div);
+
+static int
+sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd)
+{
+ int err_ret;
+ uint32 fbraddr;
+ uint8 func;
+
+ sd_trace(("%s\n", __FUNCTION__));
+
+ /* Get the Card's common CIS address */
+ sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
+ sd->func_cis_ptr[0] = sd->com_cis_ptr;
+ sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+ /* Get the Card's function CIS (for each function) */
+ for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
+ func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
+ sd->func_cis_ptr[func] = sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
+ sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
+ __FUNCTION__, func, sd->func_cis_ptr[func]));
+ }
+
+ sd->func_cis_ptr[0] = sd->com_cis_ptr;
+ sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+ /* Enable Function 1 */
+ sdio_claim_host(sd->func[1]);
+ err_ret = sdio_enable_func(sd->func[1]);
+ sdio_release_host(sd->func[1]);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x\n", err_ret));
+ }
+
+ return FALSE;
+}
+
+/*
+ * Public entry points & extern's
+ */
+extern sdioh_info_t *
+sdioh_attach(osl_t *osh, struct sdio_func *func)
+{
+ sdioh_info_t *sd = NULL;
+ int err_ret;
+
+ sd_trace(("%s\n", __FUNCTION__));
+
+ if (func == NULL) {
+ sd_err(("%s: sdio function device is NULL\n", __FUNCTION__));
+ return NULL;
+ }
+
+ if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
+ sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+ return NULL;
+ }
+ bzero((char *)sd, sizeof(sdioh_info_t));
+ sd->osh = osh;
+ sd->fake_func0.num = 0;
+ sd->fake_func0.card = func->card;
+ sd->func[0] = &sd->fake_func0;
+#ifdef GLOBAL_SDMMC_INSTANCE
+ if (func->num == 2)
+ sd->func[1] = gInstance->func[1];
+#else
+ sd->func[1] = func->card->sdio_func[0];
+#endif
+ sd->func[2] = func->card->sdio_func[1];
+#ifdef GLOBAL_SDMMC_INSTANCE
+ sd->func[func->num] = func;
+#endif
+
+#if defined (BT_OVER_SDIO)
+ sd->func[3] = NULL;
+#endif /* defined (BT_OVER_SDIO) */
+
+ sd->num_funcs = 2;
+ sd->sd_blockmode = TRUE;
+ sd->use_client_ints = TRUE;
+ sd->client_block_size[0] = 64;
+ sd->use_rxchain = CUSTOM_RXCHAIN;
+ if (sd->func[1] == NULL || sd->func[2] == NULL) {
+ sd_err(("%s: func 1 or 2 is null \n", __FUNCTION__));
+ goto fail;
+ }
+ sdio_set_drvdata(sd->func[1], sd);
+
+ sdio_claim_host(sd->func[1]);
+ sd->client_block_size[1] = sd_f1_blocksize;
+ err_ret = sdio_set_block_size(sd->func[1], sd_f1_blocksize);
+ sdio_release_host(sd->func[1]);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize(%d)\n", err_ret));
+ goto fail;
+ }
+
+ sdio_claim_host(sd->func[2]);
+ if ((func->device == BCM43362_CHIP_ID || func->device == BCM4330_CHIP_ID) &&
+ sd_f2_blocksize > 128)
+ sd_f2_blocksize = 128;
+ sd->client_block_size[2] = sd_f2_blocksize;
+ printf("%s: set sd_f2_blocksize %d\n", __FUNCTION__, sd_f2_blocksize);
+ err_ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize);
+ sdio_release_host(sd->func[2]);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d(%d)\n",
+ sd_f2_blocksize, err_ret));
+ goto fail;
+ }
+
+ sd->sd_clk_rate = sdmmc_get_clock_rate(sd);
+ printf("%s: sd clock rate = %u\n", __FUNCTION__, sd->sd_clk_rate);
+ sdioh_sdmmc_card_enablefuncs(sd);
+#if !defined(OOB_INTR_ONLY)
+ mutex_init(&sd->claim_host_mutex); // terence 20140926: fix for claim host issue
+#endif
+
+ sd_trace(("%s: Done\n", __FUNCTION__));
+ return sd;
+
+fail:
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return NULL;
+}
+
+extern SDIOH_API_RC
+sdioh_detach(osl_t *osh, sdioh_info_t *sd)
+{
+ sd_trace(("%s\n", __FUNCTION__));
+
+ if (sd) {
+
+ /* Disable Function 2 */
+ if (sd->func[2]) {
+ sdio_claim_host(sd->func[2]);
+ sdio_disable_func(sd->func[2]);
+ sdio_release_host(sd->func[2]);
+ }
+
+ /* Disable Function 1 */
+ if (sd->func[1]) {
+ sdio_claim_host(sd->func[1]);
+ sdio_disable_func(sd->func[1]);
+ sdio_release_host(sd->func[1]);
+ }
+
+ sd->func[1] = NULL;
+ sd->func[2] = NULL;
+
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ }
+ return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+
+extern SDIOH_API_RC
+sdioh_enable_func_intr(sdioh_info_t *sd)
+{
+ uint8 reg;
+ int err;
+
+ if (sd->func[0] == NULL) {
+ sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ sdio_claim_host(sd->func[0]);
+ reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err);
+ if (err) {
+ sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+ sdio_release_host(sd->func[0]);
+ return SDIOH_API_RC_FAIL;
+ }
+ /* Enable F1 and F2 interrupts, clear master enable */
+ reg &= ~INTR_CTL_MASTER_EN;
+ reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
+#if defined (BT_OVER_SDIO)
+ reg |= (INTR_CTL_FUNC3_EN);
+#endif /* defined (BT_OVER_SDIO) */
+ sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err);
+ sdio_release_host(sd->func[0]);
+
+ if (err) {
+ sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_disable_func_intr(sdioh_info_t *sd)
+{
+ uint8 reg;
+ int err;
+
+ if (sd->func[0] == NULL) {
+ sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ sdio_claim_host(sd->func[0]);
+ reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err);
+ if (err) {
+ sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+ sdio_release_host(sd->func[0]);
+ return SDIOH_API_RC_FAIL;
+ }
+ reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
+#if defined(BT_OVER_SDIO)
+ reg &= ~INTR_CTL_FUNC3_EN;
+#endif
+ /* Disable master interrupt with the last function interrupt */
+ if (!(reg & 0xFE))
+ reg = 0;
+ sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err);
+ sdio_release_host(sd->func[0]);
+
+ if (err) {
+ sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ return SDIOH_API_RC_SUCCESS;
+}
+#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
+
+/* Configure callback to client when we recieve client interrupt */
+extern SDIOH_API_RC
+sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+ if (fn == NULL) {
+ sd_err(("%s: interrupt handler is NULL, not registering\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+#if !defined(OOB_INTR_ONLY)
+ sd->intr_handler = fn;
+ sd->intr_handler_arg = argh;
+ sd->intr_handler_valid = TRUE;
+
+ /* register and unmask irq */
+ if (sd->func[2]) {
+ sdio_claim_host(sd->func[2]);
+ sdio_claim_irq(sd->func[2], IRQHandlerF2);
+ sdio_release_host(sd->func[2]);
+ }
+
+ if (sd->func[1]) {
+ sdio_claim_host(sd->func[1]);
+ sdio_claim_irq(sd->func[1], IRQHandler);
+ sdio_release_host(sd->func[1]);
+ }
+#elif defined(HW_OOB)
+ sdioh_enable_func_intr(sd);
+#endif /* !defined(OOB_INTR_ONLY) */
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_deregister(sdioh_info_t *sd)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+
+#if !defined(OOB_INTR_ONLY)
+ if (sd->func[1]) {
+ /* register and unmask irq */
+ sdio_claim_host(sd->func[1]);
+ sdio_release_irq(sd->func[1]);
+ sdio_release_host(sd->func[1]);
+ }
+
+ if (sd->func[2]) {
+ /* Claim host controller F2 */
+ sdio_claim_host(sd->func[2]);
+ sdio_release_irq(sd->func[2]);
+ /* Release host controller F2 */
+ sdio_release_host(sd->func[2]);
+ }
+
+ sd->intr_handler_valid = FALSE;
+ sd->intr_handler = NULL;
+ sd->intr_handler_arg = NULL;
+#elif defined(HW_OOB)
+ if (dhd_download_fw_on_driverload)
+ sdioh_disable_func_intr(sd);
+#endif /* !defined(OOB_INTR_ONLY) */
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+ *onoff = sd->client_intr_enabled;
+ return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(DHD_DEBUG) || defined(BCMDBG)
+extern bool
+sdioh_interrupt_pending(sdioh_info_t *sd)
+{
+ return (0);
+}
+#endif
+
+uint
+sdioh_query_iofnum(sdioh_info_t *sd)
+{
+ return sd->num_funcs;
+}
+
+/* IOVar table */
+enum {
+ IOV_MSGLEVEL = 1,
+ IOV_BLOCKMODE,
+ IOV_BLOCKSIZE,
+ IOV_DMA,
+ IOV_USEINTS,
+ IOV_NUMINTS,
+ IOV_NUMLOCALINTS,
+ IOV_HOSTREG,
+ IOV_DEVREG,
+ IOV_DIVISOR,
+ IOV_SDMODE,
+ IOV_HISPEED,
+ IOV_HCIREGS,
+ IOV_POWER,
+ IOV_CLOCK,
+ IOV_RXCHAIN
+};
+
+const bcm_iovar_t sdioh_iovars[] = {
+ {"sd_msglevel", IOV_MSGLEVEL, 0, 0, IOVT_UINT32, 0 },
+ {"sd_blockmode", IOV_BLOCKMODE, 0, 0, IOVT_BOOL, 0 },
+ {"sd_blocksize", IOV_BLOCKSIZE, 0, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */
+ {"sd_dma", IOV_DMA, 0, 0, IOVT_BOOL, 0 },
+ {"sd_ints", IOV_USEINTS, 0, 0, IOVT_BOOL, 0 },
+ {"sd_numints", IOV_NUMINTS, 0, 0, IOVT_UINT32, 0 },
+ {"sd_numlocalints", IOV_NUMLOCALINTS, 0, 0, IOVT_UINT32, 0 },
+#ifdef BCMINTERNAL
+ {"sd_hostreg", IOV_HOSTREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sd_devreg", IOV_DEVREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+#endif /* BCMINTERNAL */
+ {"sd_divisor", IOV_DIVISOR, 0, 0, IOVT_UINT32, 0 },
+ {"sd_power", IOV_POWER, 0, 0, IOVT_UINT32, 0 },
+ {"sd_clock", IOV_CLOCK, 0, 0, IOVT_UINT32, 0 },
+ {"sd_mode", IOV_SDMODE, 0, 0, IOVT_UINT32, 100},
+ {"sd_highspeed", IOV_HISPEED, 0, 0, IOVT_UINT32, 0 },
+ {"sd_rxchain", IOV_RXCHAIN, 0, 0, IOVT_BOOL, 0 },
+#ifdef BCMDBG
+ {"sd_hciregs", IOV_HCIREGS, 0, 0, IOVT_BUFFER, 0 },
+#endif
+ {NULL, 0, 0, 0, 0, 0 }
+};
+
+int
+sdioh_iovar_op(sdioh_info_t *si, const char *name,
+ void *params, int plen, void *arg, uint len, bool set)
+{
+ const bcm_iovar_t *vi = NULL;
+ int bcmerror = 0;
+ uint val_size;
+ int32 int_val = 0;
+ bool bool_val;
+ uint32 actionid;
+
+ ASSERT(name);
+
+ /* Get must have return space; Set does not take qualifiers */
+ ASSERT(set || (arg && len));
+ ASSERT(!set || (!params && !plen));
+
+ sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
+
+ if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
+ bcmerror = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
+ goto exit;
+
+ /* XXX Copied from dhd, copied from wl; certainly overkill here? */
+ /* Set up params so get and set can share the convenience variables */
+ if (params == NULL) {
+ params = arg;
+ plen = len;
+ }
+
+ if (vi->type == IOVT_VOID)
+ val_size = 0;
+ else if (vi->type == IOVT_BUFFER)
+ val_size = len;
+ else
+ val_size = sizeof(int);
+
+ if (plen >= (int)sizeof(int_val))
+ bcopy(params, &int_val, sizeof(int_val));
+
+ bool_val = (int_val != 0) ? TRUE : FALSE;
+ BCM_REFERENCE(bool_val);
+
+ actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+ switch (actionid) {
+ case IOV_GVAL(IOV_MSGLEVEL):
+ int_val = (int32)sd_msglevel;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_MSGLEVEL):
+ sd_msglevel = int_val;
+ break;
+
+ case IOV_GVAL(IOV_BLOCKMODE):
+ int_val = (int32)si->sd_blockmode;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_BLOCKMODE):
+ si->sd_blockmode = (bool)int_val;
+ /* Haven't figured out how to make non-block mode with DMA */
+ break;
+
+ case IOV_GVAL(IOV_BLOCKSIZE):
+ if ((uint32)int_val > si->num_funcs) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ int_val = (int32)si->client_block_size[int_val];
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_BLOCKSIZE):
+ {
+ uint func = ((uint32)int_val >> 16);
+ uint blksize = (uint16)int_val;
+ uint maxsize;
+
+ if (func > si->num_funcs) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ /* XXX These hardcoded sizes are a hack, remove after proper CIS parsing. */
+ switch (func) {
+ case 0: maxsize = 32; break;
+ case 1: maxsize = BLOCK_SIZE_4318; break;
+ case 2: maxsize = BLOCK_SIZE_4328; break;
+ default: maxsize = 0;
+ }
+ if (blksize > maxsize) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ if (!blksize) {
+ blksize = maxsize;
+ }
+
+ /* Now set it */
+ si->client_block_size[func] = blksize;
+
+#ifdef USE_DYNAMIC_F2_BLKSIZE
+ if (si->func[func] == NULL) {
+ sd_err(("%s: SDIO Device not present\n", __FUNCTION__));
+ bcmerror = BCME_NORESOURCE;
+ break;
+ }
+ sdio_claim_host(si->func[func]);
+ bcmerror = sdio_set_block_size(si->func[func], blksize);
+ if (bcmerror)
+ sd_err(("%s: Failed to set F%d blocksize to %d(%d)\n",
+ __FUNCTION__, func, blksize, bcmerror));
+ sdio_release_host(si->func[func]);
+#endif /* USE_DYNAMIC_F2_BLKSIZE */
+ break;
+ }
+
+ case IOV_GVAL(IOV_RXCHAIN):
+ int_val = (int32)si->use_rxchain;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_DMA):
+ int_val = (int32)si->sd_use_dma;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DMA):
+ si->sd_use_dma = (bool)int_val;
+ break;
+
+ case IOV_GVAL(IOV_USEINTS):
+ int_val = (int32)si->use_client_ints;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_USEINTS):
+ si->use_client_ints = (bool)int_val;
+ if (si->use_client_ints)
+ si->intmask |= CLIENT_INTR;
+ else
+ si->intmask &= ~CLIENT_INTR;
+
+ break;
+
+ case IOV_GVAL(IOV_DIVISOR):
+ int_val = (uint32)sd_divisor;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DIVISOR):
+ /* set the clock to divisor, if value is non-zero & power of 2 */
+ if (int_val && !(int_val & (int_val - 1))) {
+ sd_divisor = int_val;
+ sdmmc_set_clock_divisor(si, sd_divisor);
+ } else {
+ DHD_ERROR(("%s: Invalid sd_divisor value, should be power of 2!\n",
+ __FUNCTION__));
+ }
+ break;
+
+ case IOV_GVAL(IOV_POWER):
+ int_val = (uint32)sd_power;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_POWER):
+ sd_power = int_val;
+ break;
+
+ case IOV_GVAL(IOV_CLOCK):
+ int_val = (uint32)sd_clock;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_CLOCK):
+ sd_clock = int_val;
+ break;
+
+ case IOV_GVAL(IOV_SDMODE):
+ int_val = (uint32)sd_sdmode;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SDMODE):
+ sd_sdmode = int_val;
+ break;
+
+ case IOV_GVAL(IOV_HISPEED):
+ int_val = (uint32)sd_hiok;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_HISPEED):
+ sd_hiok = int_val;
+ break;
+
+ case IOV_GVAL(IOV_NUMINTS):
+ int_val = (int32)si->intrcount;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_NUMLOCALINTS):
+ int_val = (int32)0;
+ bcopy(&int_val, arg, val_size);
+ break;
+#ifdef BCMINTERNAL
+ case IOV_GVAL(IOV_HOSTREG):
+ {
+ /* XXX Should copy for alignment reasons */
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+
+ if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
+ sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__,
+ (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+ sd_ptr->offset));
+ if (sd_ptr->offset & 1)
+ int_val = 8; /* sdioh_sdmmc_rreg8(si, sd_ptr->offset); */
+ else if (sd_ptr->offset & 2)
+ int_val = 16; /* sdioh_sdmmc_rreg16(si, sd_ptr->offset); */
+ else
+ int_val = 32; /* sdioh_sdmmc_rreg(si, sd_ptr->offset); */
+
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+
+ case IOV_SVAL(IOV_HOSTREG):
+ {
+ /* XXX Should copy for alignment reasons */
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+
+ if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
+ sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value,
+ (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+ sd_ptr->offset));
+ break;
+ }
+
+ case IOV_GVAL(IOV_DEVREG):
+ {
+ /* XXX Should copy for alignment reasons */
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+ uint8 data = 0;
+
+ if ((uint)sd_ptr->func > si->num_funcs) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+
+ int_val = (int)data;
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+
+ case IOV_SVAL(IOV_DEVREG):
+ {
+ /* XXX Should copy for alignment reasons */
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+ uint8 data = (uint8)sd_ptr->value;
+
+ if ((uint)sd_ptr->func > si->num_funcs) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+ break;
+ }
+#endif /* BCMINTERNAL */
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+exit:
+
+ /* XXX Remove protective lock after clients all clean... */
+ return bcmerror;
+}
+
+#if (defined(OOB_INTR_ONLY) && defined(HW_OOB)) || defined(FORCE_WOWLAN)
+/*
+ * XXX dhd -i eth0 sd_devreg 0 0xf2 0x3
+ */
+
+#ifdef CUSTOMER_HW_AMLOGIC
+#include <linux/amlogic/aml_gpio_consumer.h>
+extern int wifi_irq_trigger_level(void);
+#endif
+SDIOH_API_RC
+sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable)
+{
+ SDIOH_API_RC status;
+ uint8 data;
+
+ if (enable) {
+ if (wifi_irq_trigger_level() == GPIO_IRQ_LOW)
+ data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
+ else
+ data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE | SDIO_SEPINT_ACT_HI;
+ }
+ else
+ data = SDIO_SEPINT_ACT_HI; /* disable hw oob interrupt */
+
+ status = sdioh_request_byte(sd, SDIOH_WRITE, 0, SDIOD_CCCR_BRCM_SEPINT, &data);
+ return status;
+}
+#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
+
+extern SDIOH_API_RC
+sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+ SDIOH_API_RC status;
+ /* No lock needed since sdioh_request_byte does locking */
+ status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
+ return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+ /* No lock needed since sdioh_request_byte does locking */
+ SDIOH_API_RC status;
+ status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
+ return status;
+}
+
+static int
+sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
+{
+ /* read 24 bits and return valid 17 bit addr */
+ int i;
+ uint32 scratch, regdata;
+ uint8 *ptr = (uint8 *)&scratch;
+ for (i = 0; i < 3; i++) {
+ if ((sdioh_sdmmc_card_regread (sd, 0, regaddr, 1, ®data)) != SUCCESS)
+ sd_err(("%s: Can't read!\n", __FUNCTION__));
+
+ *ptr++ = (uint8) regdata;
+ regaddr++;
+ }
+
+ /* Only the lower 17-bits are valid */
+ scratch = ltoh32(scratch);
+ scratch &= 0x0001FFFF;
+ return (scratch);
+}
+
+extern SDIOH_API_RC
+sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
+{
+ uint32 count;
+ int offset;
+ uint32 foo;
+ uint8 *cis = cisd;
+
+ sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
+
+ if (!sd->func_cis_ptr[func]) {
+ bzero(cis, length);
+ sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func]));
+
+ for (count = 0; count < length; count++) {
+ offset = sd->func_cis_ptr[func] + count;
+ if (sdioh_sdmmc_card_regread (sd, 0, offset, 1, &foo) < 0) {
+ sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ *cis = (uint8)(foo & 0xff);
+ cis++;
+ }
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_cisaddr_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 offset)
+{
+ uint32 foo;
+
+ sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
+
+ if (!sd->func_cis_ptr[func]) {
+ sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ if (sdioh_sdmmc_card_regread (sd, 0, sd->func_cis_ptr[func]+offset, 1, &foo) < 0) {
+ sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ *cisd = (uint8)(foo & 0xff);
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
+{
+ int err_ret = 0;
+#if defined(MMC_SDIO_ABORT)
+ int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
+#endif
+ struct osl_timespec now, before;
+
+ if (sd_msglevel & SDH_COST_VAL)
+ osl_do_gettimeofday(&before);
+
+ sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr));
+
+ DHD_PM_RESUME_WAIT(sdioh_request_byte_wait);
+ DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+ if(rw) { /* CMD52 Write */
+ if (func == 0) {
+ /* Can only directly write to some F0 registers. Handle F2 enable
+ * as a special case.
+ */
+ if (regaddr == SDIOD_CCCR_IOEN) {
+#if defined (BT_OVER_SDIO)
+ do {
+ if (sd->func[3]) {
+ sd_info(("bcmsdh_sdmmc F3: *byte 0x%x\n", *byte));
+
+ if (*byte & SDIO_FUNC_ENABLE_3) {
+ sdio_claim_host(sd->func[3]);
+
+ /* Set Function 3 Block Size */
+ err_ret = sdio_set_block_size(sd->func[3],
+ sd_f3_blocksize);
+ if (err_ret) {
+ sd_err(("F3 blocksize set err%d\n",
+ err_ret));
+ }
+
+ /* Enable Function 3 */
+ sd_info(("bcmsdh_sdmmc F3: enable F3 fn %p\n",
+ sd->func[3]));
+ err_ret = sdio_enable_func(sd->func[3]);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: enable F3 err:%d\n",
+ err_ret));
+ }
+
+ sdio_release_host(sd->func[3]);
+
+ break;
+ } else if (*byte & SDIO_FUNC_DISABLE_3) {
+ sdio_claim_host(sd->func[3]);
+
+ /* Disable Function 3 */
+ sd_info(("bcmsdh_sdmmc F3: disable F3 fn %p\n",
+ sd->func[3]));
+ err_ret = sdio_disable_func(sd->func[3]);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Disable F3 err:%d\n",
+ err_ret));
+ }
+ sdio_release_host(sd->func[3]);
+ sd->func[3] = NULL;
+
+ break;
+ }
+ }
+#endif /* defined (BT_OVER_SDIO) */
+ if (sd->func[2]) {
+ sdio_claim_host(sd->func[2]);
+ if (*byte & SDIO_FUNC_ENABLE_2) {
+ /* Enable Function 2 */
+ err_ret = sdio_enable_func(sd->func[2]);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: enable F2 failed:%d\n",
+ err_ret));
+ }
+ } else {
+ /* Disable Function 2 */
+ err_ret = sdio_disable_func(sd->func[2]);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d\n",
+ err_ret));
+ }
+ }
+ sdio_release_host(sd->func[2]);
+ }
+#if defined (BT_OVER_SDIO)
+ } while (0);
+#endif /* defined (BT_OVER_SDIO) */
+ }
+#if defined(MMC_SDIO_ABORT)
+ /* to allow abort command through F1 */
+ else if (regaddr == SDIOD_CCCR_IOABORT) {
+ /* XXX Because of SDIO3.0 host issue on Manta,
+ * sometimes the abort fails.
+ * Retrying again will fix this issue.
+ */
+ while (sdio_abort_retry--) {
+ if (sd->func[func]) {
+ sdio_claim_host(sd->func[func]);
+ /*
+ * this sdio_f0_writeb() can be replaced with
+ * another api depending upon MMC driver change.
+ * As of this time, this is temporaray one
+ */
+ sdio_writeb(sd->func[func],
+ *byte, regaddr, &err_ret);
+ sdio_release_host(sd->func[func]);
+ }
+ if (!err_ret)
+ break;
+ }
+ }
+#endif /* MMC_SDIO_ABORT */
+#if defined(SDIO_ISR_THREAD)
+ else if (regaddr == SDIOD_CCCR_INTR_EXTN) {
+ while (sdio_abort_retry--) {
+ if (sd->func[func]) {
+ sdio_claim_host(sd->func[func]);
+ /*
+ * this sdio_f0_writeb() can be replaced with
+ * another api depending upon MMC driver change.
+ * As of this time, this is temporaray one
+ */
+ sdio_writeb(sd->func[func],
+ *byte, regaddr, &err_ret);
+ sdio_release_host(sd->func[func]);
+ }
+ if (!err_ret)
+ break;
+ }
+ }
+#endif
+ else if (regaddr < 0xF0) {
+ sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr));
+ } else {
+ /* Claim host controller, perform F0 write, and release */
+ if (sd->func[func]) {
+ sdio_claim_host(sd->func[func]);
+ sdio_f0_writeb(sd->func[func],
+ *byte, regaddr, &err_ret);
+ sdio_release_host(sd->func[func]);
+ }
+ }
+ } else {
+ /* Claim host controller, perform Fn write, and release */
+ if (sd->func[func]) {
+ sdio_claim_host(sd->func[func]);
+ sdio_writeb(sd->func[func], *byte, regaddr, &err_ret);
+ sdio_release_host(sd->func[func]);
+ }
+ }
+ } else { /* CMD52 Read */
+ /* Claim host controller, perform Fn read, and release */
+ if (sd->func[func]) {
+ sdio_claim_host(sd->func[func]);
+ if (func == 0) {
+ *byte = sdio_f0_readb(sd->func[func], regaddr, &err_ret);
+ } else {
+ *byte = sdio_readb(sd->func[func], regaddr, &err_ret);
+ }
+ sdio_release_host(sd->func[func]);
+ }
+ }
+
+ if (err_ret) {
+ if ((regaddr == 0x1001F) && ((err_ret == -ETIMEDOUT) || (err_ret == -EILSEQ)
+ || (err_ret == -EIO))) {
+ /* XXX: Read/Write to SBSDIO_FUNC1_SLEEPCSR could return -110(timeout)
+ * or -84(CRC) error in case the host tries to wake the device up.
+ * Skip error log message if err code is -110 or -84 when accessing
+ * to SBSDIO_FUNC1_SLEEPCSR to avoid QA misunderstand and DHD shoul
+ * print error log message if retry count over the MAX_KSO_ATTEMPTS.
+ */
+ } else {
+ sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
+ rw ? "Write" : "Read", func, regaddr, *byte, err_ret));
+ }
+ }
+
+ if (sd_msglevel & SDH_COST_VAL) {
+ uint32 diff_us;
+ osl_do_gettimeofday(&now);
+ diff_us = osl_do_gettimediff(&now, &before);
+ sd_cost(("%s: rw=%d len=1 cost = %3dms %3dus\n", __FUNCTION__,
+ rw, diff_us/1000, diff_us%1000));
+ }
+
+ return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+uint
+sdioh_set_mode(sdioh_info_t *sd, uint mode)
+{
+ if (mode == SDPCM_TXGLOM_CPY)
+ sd->txglom_mode = mode;
+ else if (mode == SDPCM_TXGLOM_MDESC)
+ sd->txglom_mode = mode;
+
+ return (sd->txglom_mode);
+}
+
+#ifdef PKT_STATICS
+uint32
+sdioh_get_spend_time(sdioh_info_t *sd)
+{
+ return (sd->sdio_spent_time_us);
+}
+#endif
+
+extern SDIOH_API_RC
+sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
+ uint32 *word, uint nbytes)
+{
+ int err_ret = SDIOH_API_RC_FAIL;
+ int err_ret2 = SDIOH_API_RC_SUCCESS; // terence 20130621: prevent dhd_dpc in dead lock
+#if defined(MMC_SDIO_ABORT)
+ int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
+#endif
+ struct osl_timespec now, before;
+
+ if (sd_msglevel & SDH_COST_VAL)
+ osl_do_gettimeofday(&before);
+
+ if (func == 0) {
+ sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
+ __FUNCTION__, cmd_type, rw, func, addr, nbytes));
+
+ DHD_PM_RESUME_WAIT(sdioh_request_word_wait);
+ DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+ /* Claim host controller */
+ sdio_claim_host(sd->func[func]);
+
+ if(rw) { /* CMD52 Write */
+ if (nbytes == 4) {
+ sdio_writel(sd->func[func], *word, addr, &err_ret);
+ } else if (nbytes == 2) {
+ sdio_writew(sd->func[func], (*word & 0xFFFF), addr, &err_ret);
+ } else {
+ sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
+ }
+ } else { /* CMD52 Read */
+ if (nbytes == 4) {
+ *word = sdio_readl(sd->func[func], addr, &err_ret);
+ } else if (nbytes == 2) {
+ *word = sdio_readw(sd->func[func], addr, &err_ret) & 0xFFFF;
+ } else {
+ sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
+ }
+ }
+
+ /* Release host controller */
+ sdio_release_host(sd->func[func]);
+
+ if (err_ret) {
+#if defined(MMC_SDIO_ABORT)
+ /* Any error on CMD53 transaction should abort that function using function 0. */
+ while (sdio_abort_retry--) {
+ if (sd->func[0]) {
+ sdio_claim_host(sd->func[0]);
+ /*
+ * this sdio_f0_writeb() can be replaced with another api
+ * depending upon MMC driver change.
+ * As of this time, this is temporaray one
+ */
+ sdio_writeb(sd->func[0],
+ func, SDIOD_CCCR_IOABORT, &err_ret2);
+ sdio_release_host(sd->func[0]);
+ }
+ if (!err_ret2)
+ break;
+ }
+ if (err_ret)
+#endif /* MMC_SDIO_ABORT */
+ {
+ sd_err(("bcmsdh_sdmmc: Failed to %s word F%d:@0x%05x=%02x, Err: 0x%08x\n",
+ rw ? "Write" : "Read", func, addr, *word, err_ret));
+ }
+ }
+
+ if (sd_msglevel & SDH_COST_VAL) {
+ uint32 diff_us;
+ osl_do_gettimeofday(&now);
+ diff_us = osl_do_gettimediff(&now, &before);
+ sd_cost(("%s: rw=%d, len=%d cost = %3dms %3dus\n", __FUNCTION__,
+ rw, nbytes, diff_us/1000, diff_us%1000));
+ }
+
+ return (((err_ret == 0)&&(err_ret2 == 0)) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+#ifdef BCMSDIOH_TXGLOM
+static SDIOH_API_RC
+sdioh_request_packet_chain(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
+ uint addr, void *pkt)
+{
+ bool fifo = (fix_inc == SDIOH_DATA_FIX);
+ int err_ret = 0;
+ void *pnext;
+ uint ttl_len, pkt_offset;
+ uint blk_num;
+ uint blk_size;
+ uint max_blk_count;
+ uint max_req_size;
+ struct mmc_request mmc_req;
+ struct mmc_command mmc_cmd;
+ struct mmc_data mmc_dat;
+ uint32 sg_count;
+ struct sdio_func *sdio_func = sd->func[func];
+ struct mmc_host *host = sdio_func->card->host;
+ uint8 *localbuf = NULL;
+ uint local_plen = 0;
+ uint pkt_len = 0;
+ struct osl_timespec now, before;
+
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+ ASSERT(pkt);
+ DHD_PM_RESUME_WAIT(sdioh_request_packet_wait);
+ DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+
+#ifndef PKT_STATICS
+ if (sd_msglevel & SDH_COST_VAL)
+#endif
+ osl_do_gettimeofday(&before);
+
+ blk_size = sd->client_block_size[func];
+ max_blk_count = min(host->max_blk_count, (uint)MAX_IO_RW_EXTENDED_BLK);
+ max_req_size = min(max_blk_count * blk_size, host->max_req_size);
+
+ pkt_offset = 0;
+ pnext = pkt;
+
+ ttl_len = 0;
+ sg_count = 0;
+ if(sd->txglom_mode == SDPCM_TXGLOM_MDESC) {
+ while (pnext != NULL) {
+ ttl_len = 0;
+ sg_count = 0;
+ memset(&mmc_req, 0, sizeof(struct mmc_request));
+ memset(&mmc_cmd, 0, sizeof(struct mmc_command));
+ memset(&mmc_dat, 0, sizeof(struct mmc_data));
+ sg_init_table(sd->sg_list, ARRAYSIZE(sd->sg_list));
+
+ /* Set up scatter-gather DMA descriptors. this loop is to find out the max
+ * data we can transfer with one command 53. blocks per command is limited by
+ * host max_req_size and 9-bit max block number. when the total length of this
+ * packet chain is bigger than max_req_size, use multiple SD_IO_RW_EXTENDED
+ * commands (each transfer is still block aligned)
+ */
+ while (pnext != NULL && ttl_len < max_req_size) {
+ int pkt_len;
+ int sg_data_size;
+ uint8 *pdata = (uint8*)PKTDATA(sd->osh, pnext);
+
+ ASSERT(pdata != NULL);
+ pkt_len = PKTLEN(sd->osh, pnext);
+ sd_trace(("%s[%d] data=%p, len=%d\n", __FUNCTION__, write, pdata, pkt_len));
+ /* sg_count is unlikely larger than the array size, and this is
+ * NOT something we can handle here, but in case it happens, PLEASE put
+ * a restriction on max tx/glom count (based on host->max_segs).
+ */
+ if (sg_count >= ARRAYSIZE(sd->sg_list)) {
+ sd_err(("%s: sg list entries(%u) exceed limit(%zu),"
+ " sd blk_size=%u\n",
+ __FUNCTION__, sg_count, (size_t)ARRAYSIZE(sd->sg_list), blk_size));
+ return (SDIOH_API_RC_FAIL);
+ }
+ pdata += pkt_offset;
+
+ sg_data_size = pkt_len - pkt_offset;
+ if (sg_data_size > max_req_size - ttl_len)
+ sg_data_size = max_req_size - ttl_len;
+ /* some platforms put a restriction on the data size of each scatter-gather
+ * DMA descriptor, use multiple sg buffers when xfer_size is bigger than
+ * max_seg_size
+ */
+ if (sg_data_size > host->max_seg_size) {
+ sg_data_size = host->max_seg_size;
+ }
+ sg_set_buf(&sd->sg_list[sg_count++], pdata, sg_data_size);
+
+ ttl_len += sg_data_size;
+ pkt_offset += sg_data_size;
+ if (pkt_offset == pkt_len) {
+ pnext = PKTNEXT(sd->osh, pnext);
+ pkt_offset = 0;
+ }
+ }
+
+ if (ttl_len % blk_size != 0) {
+ sd_err(("%s, data length %d not aligned to block size %d\n",
+ __FUNCTION__, ttl_len, blk_size));
+ return SDIOH_API_RC_FAIL;
+ }
+ blk_num = ttl_len / blk_size;
+ mmc_dat.sg = sd->sg_list;
+ mmc_dat.sg_len = sg_count;
+ mmc_dat.blksz = blk_size;
+ mmc_dat.blocks = blk_num;
+ mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
+ mmc_cmd.opcode = 53; /* SD_IO_RW_EXTENDED */
+ mmc_cmd.arg = write ? 1<<31 : 0;
+ mmc_cmd.arg |= (func & 0x7) << 28;
+ mmc_cmd.arg |= 1<<27;
+ mmc_cmd.arg |= fifo ? 0 : 1<<26;
+ mmc_cmd.arg |= (addr & 0x1FFFF) << 9;
+ mmc_cmd.arg |= blk_num & 0x1FF;
+ mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
+ mmc_req.cmd = &mmc_cmd;
+ mmc_req.data = &mmc_dat;
+ if (!fifo)
+ addr += ttl_len;
+
+ sdio_claim_host(sdio_func);
+ mmc_set_data_timeout(&mmc_dat, sdio_func->card);
+ mmc_wait_for_req(host, &mmc_req);
+ sdio_release_host(sdio_func);
+
+ err_ret = mmc_cmd.error? mmc_cmd.error : mmc_dat.error;
+ if (0 != err_ret) {
+ sd_err(("%s:CMD53 %s failed with code %d\n",
+ __FUNCTION__, write ? "write" : "read", err_ret));
+ return SDIOH_API_RC_FAIL;
+ }
+ }
+ } else if(sd->txglom_mode == SDPCM_TXGLOM_CPY) {
+ for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) {
+ ttl_len += PKTLEN(sd->osh, pnext);
+ }
+ /* Claim host controller */
+ sdio_claim_host(sd->func[func]);
+ for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) {
+ uint8 *buf = (uint8*)PKTDATA(sd->osh, pnext);
+ pkt_len = PKTLEN(sd->osh, pnext);
+
+ if (!localbuf) {
+ localbuf = (uint8 *)MALLOC(sd->osh, ttl_len);
+ if (localbuf == NULL) {
+ sd_err(("%s: %s TXGLOM: localbuf malloc FAILED\n",
+ __FUNCTION__, (write) ? "TX" : "RX"));
+ goto txglomfail;
+ }
+ }
+
+ bcopy(buf, (localbuf + local_plen), pkt_len);
+ local_plen += pkt_len;
+ if (PKTNEXT(sd->osh, pnext))
+ continue;
+
+ buf = localbuf;
+ pkt_len = local_plen;
+txglomfail:
+ /* Align Patch */
+ if (!write || pkt_len < 32)
+ pkt_len = (pkt_len + 3) & 0xFFFFFFFC;
+ else if (pkt_len % blk_size)
+ pkt_len += blk_size - (pkt_len % blk_size);
+
+ if ((write) && (!fifo))
+ err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, pkt_len);
+ else if (write)
+ err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, pkt_len);
+ else if (fifo)
+ err_ret = sdio_readsb(sd->func[func], buf, addr, pkt_len);
+ else
+ err_ret = sdio_memcpy_fromio(sd->func[func], buf, addr, pkt_len);
+
+ if (err_ret)
+ sd_err(("%s: %s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=%d\n",
+ __FUNCTION__,
+ (write) ? "TX" : "RX",
+ pnext, sg_count, addr, pkt_len, err_ret));
+ else
+ sd_trace(("%s: %s xfr'd %p[%d], addr=0x%05x, len=%d\n",
+ __FUNCTION__,
+ (write) ? "TX" : "RX",
+ pnext, sg_count, addr, pkt_len));
+
+ if (!fifo)
+ addr += pkt_len;
+ sg_count ++;
+ }
+ sdio_release_host(sd->func[func]);
+ } else {
+ sd_err(("%s: set to wrong glom mode %d\n", __FUNCTION__, sd->txglom_mode));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ if (localbuf)
+ MFREE(sd->osh, localbuf, ttl_len);
+
+#ifndef PKT_STATICS
+ if (sd_msglevel & SDH_COST_VAL)
+#endif
+ {
+ uint32 diff_us;
+ osl_do_gettimeofday(&now);
+ diff_us = osl_do_gettimediff(&now, &before);
+ sd_cost(("%s: rw=%d, ttl_len=%4d cost = %3dms %3dus\n", __FUNCTION__,
+ write, ttl_len, diff_us/1000, diff_us%1000));
+#ifdef PKT_STATICS
+ if (write && (func == 2))
+ sd->sdio_spent_time_us = diff_us;
+#endif
+ }
+
+ sd_trace(("%s: Exit\n", __FUNCTION__));
+ return SDIOH_API_RC_SUCCESS;
+}
+#endif /* BCMSDIOH_TXGLOM */
+
+static SDIOH_API_RC
+sdioh_buffer_tofrom_bus(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
+ uint addr, uint8 *buf, uint len)
+{
+ bool fifo = (fix_inc == SDIOH_DATA_FIX);
+ int err_ret = 0;
+ struct osl_timespec now, before;
+
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+ ASSERT(buf);
+
+ if (sd_msglevel & SDH_COST_VAL)
+ osl_do_gettimeofday(&before);
+
+ /* NOTE:
+ * For all writes, each packet length is aligned to 32 (or 4)
+ * bytes in dhdsdio_txpkt_preprocess, and for glom the last packet length
+ * is aligned to block boundary. If you want to align each packet to
+ * a custom size, please do it in dhdsdio_txpkt_preprocess, NOT here
+ *
+ * For reads, the alignment is doen in sdioh_request_buffer.
+ *
+ */
+ sdio_claim_host(sd->func[func]);
+
+ if ((write) && (!fifo))
+ err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len);
+ else if (write)
+ err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len);
+ else if (fifo)
+ err_ret = sdio_readsb(sd->func[func], buf, addr, len);
+ else
+ err_ret = sdio_memcpy_fromio(sd->func[func], buf, addr, len);
+
+ sdio_release_host(sd->func[func]);
+
+ if (err_ret)
+ sd_err(("%s: %s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=%d\n", __FUNCTION__,
+ (write) ? "TX" : "RX", buf, addr, len, err_ret));
+ else
+ sd_trace(("%s: %s xfr'd %p, addr=0x%05x, len=%d\n", __FUNCTION__,
+ (write) ? "TX" : "RX", buf, addr, len));
+
+ sd_trace(("%s: Exit\n", __FUNCTION__));
+
+ if (sd_msglevel & SDH_COST_VAL) {
+ uint32 diff_us;
+ osl_do_gettimeofday(&now);
+ diff_us = osl_do_gettimediff(&now, &before);
+ sd_cost(("%s: rw=%d, len=%4d cost = %3dms %3dus\n", __FUNCTION__,
+ write, len, diff_us/1000, diff_us%1000));
+ }
+
+ return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+/*
+ * This function takes a buffer or packet, and fixes everything up so that in the
+ * end, a DMA-able packet is created.
+ *
+ * A buffer does not have an associated packet pointer, and may or may not be aligned.
+ * A packet may consist of a single packet, or a packet chain. If it is a packet chain,
+ * then all the packets in the chain must be properly aligned. If the packet data is not
+ * aligned, then there may only be one packet, and in this case, it is copied to a new
+ * aligned packet.
+ *
+ */
+extern SDIOH_API_RC
+sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func,
+ uint addr, uint reg_width, uint buf_len, uint8 *buffer, void *pkt)
+{
+ SDIOH_API_RC status;
+ void *tmppkt;
+ int is_vmalloc = FALSE;
+ struct osl_timespec now, before;
+
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+ DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait);
+ DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+
+ if (sd_msglevel & SDH_COST_VAL)
+ osl_do_gettimeofday(&before);
+
+ if (pkt) {
+#ifdef BCMSDIOH_TXGLOM
+ /* packet chain, only used for tx/rx glom, all packets length
+ * are aligned, total length is a block multiple
+ */
+ if (PKTNEXT(sd->osh, pkt))
+ return sdioh_request_packet_chain(sd, fix_inc, write, func, addr, pkt);
+#endif /* BCMSDIOH_TXGLOM */
+ /* non-glom mode, ignore the buffer parameter and use the packet pointer
+ * (this shouldn't happen)
+ */
+ buffer = PKTDATA(sd->osh, pkt);
+ buf_len = PKTLEN(sd->osh, pkt);
+ }
+
+ ASSERT(buffer);
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 24)
+ is_vmalloc = is_vmalloc_addr(buffer);
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 24) */
+
+ /* buffer and length are aligned, use it directly so we can avoid memory copy */
+ if ((((ulong)buffer & DMA_ALIGN_MASK) == 0) && ((buf_len & DMA_ALIGN_MASK) == 0) &&
+ (!is_vmalloc)) {
+ return sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr, buffer, buf_len);
+ }
+
+ if (is_vmalloc) {
+ sd_trace(("%s: Need to memory copy due to virtual memory address.\n",
+ __FUNCTION__));
+ }
+
+ sd_trace(("%s: [%d] doing memory copy buf=%p, len=%d\n",
+ __FUNCTION__, write, buffer, buf_len));
+
+ /* otherwise, a memory copy is needed as the input buffer is not aligned */
+ tmppkt = PKTGET_STATIC(sd->osh, buf_len + DEFAULT_SDIO_F2_BLKSIZE, write ? TRUE : FALSE);
+ if (tmppkt == NULL) {
+ sd_err(("%s: PKTGET failed: len %d\n", __FUNCTION__, buf_len));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ if (write)
+ bcopy(buffer, PKTDATA(sd->osh, tmppkt), buf_len);
+
+ status = sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr,
+ PKTDATA(sd->osh, tmppkt), ROUNDUP(buf_len, (DMA_ALIGN_MASK+1)));
+
+ if (!write)
+ bcopy(PKTDATA(sd->osh, tmppkt), buffer, buf_len);
+
+ PKTFREE_STATIC(sd->osh, tmppkt, write ? TRUE : FALSE);
+
+ if (sd_msglevel & SDH_COST_VAL) {
+ uint32 diff_us;
+ osl_do_gettimeofday(&now);
+ diff_us = osl_do_gettimediff(&now, &before);
+ sd_cost(("%s: rw=%d, len=%d cost = %3dms %3dus\n", __FUNCTION__,
+ write, buf_len, diff_us/1000, diff_us%1000));
+ }
+
+ return status;
+}
+
+/* this function performs "abort" for both of host & device */
+extern int
+sdioh_abort(sdioh_info_t *sd, uint func)
+{
+#if defined(MMC_SDIO_ABORT)
+ char t_func = (char) func;
+#endif /* defined(MMC_SDIO_ABORT) */
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+
+ /* XXX Standard Linux SDIO Stack cannot perform an abort. */
+#if defined(MMC_SDIO_ABORT)
+ /* issue abort cmd52 command through F1 */
+ sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT, &t_func);
+#endif /* defined(MMC_SDIO_ABORT) */
+
+ sd_trace(("%s: Exit\n", __FUNCTION__));
+ return SDIOH_API_RC_SUCCESS;
+}
+
+/* Reset and re-initialize the device */
+int sdioh_sdio_reset(sdioh_info_t *si)
+{
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+ sd_trace(("%s: Exit\n", __FUNCTION__));
+ return SDIOH_API_RC_SUCCESS;
+}
+
+/* Disable device interrupt */
+void
+sdioh_sdmmc_devintr_off(sdioh_info_t *sd)
+{
+ sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+ sd->intmask &= ~CLIENT_INTR;
+}
+
+/* Enable device interrupt */
+void
+sdioh_sdmmc_devintr_on(sdioh_info_t *sd)
+{
+ sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+ sd->intmask |= CLIENT_INTR;
+}
+
+/* Read client card reg */
+int
+sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+
+ if ((func == 0) || (regsize == 1)) {
+ uint8 temp = 0;
+
+ sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
+ *data = temp;
+ *data &= 0xff;
+ sd_data(("%s: byte read data=0x%02x\n",
+ __FUNCTION__, *data));
+ } else {
+ if (sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize)) {
+ return BCME_SDIO_ERROR;
+ }
+ if (regsize == 2)
+ *data &= 0xffff;
+
+ sd_data(("%s: word read data=0x%08x\n",
+ __FUNCTION__, *data));
+ }
+
+ return SUCCESS;
+}
+
+#if !defined(OOB_INTR_ONLY)
+void sdio_claim_host_lock_local(sdioh_info_t *sd) // terence 20140926: fix for claim host issue
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ if (sd)
+ mutex_lock(&sd->claim_host_mutex);
+#endif
+}
+
+void sdio_claim_host_unlock_local(sdioh_info_t *sd) // terence 20140926: fix for claim host issue
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ if (sd)
+ mutex_unlock(&sd->claim_host_mutex);
+#endif
+}
+
+/* bcmsdh_sdmmc interrupt handler */
+static void IRQHandler(struct sdio_func *func)
+{
+ sdioh_info_t *sd;
+
+ sd = sdio_get_drvdata(func);
+
+ ASSERT(sd != NULL);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ if (mutex_is_locked(&sd->claim_host_mutex)) {
+ printf("%s: muxtex is locked and return\n", __FUNCTION__);
+ return;
+ }
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
+
+ sdio_claim_host_lock_local(sd);
+ sdio_release_host(sd->func[0]);
+
+ if (sd->use_client_ints) {
+ sd->intrcount++;
+ ASSERT(sd->intr_handler);
+ ASSERT(sd->intr_handler_arg);
+ (sd->intr_handler)(sd->intr_handler_arg);
+ } else { /* XXX - Do not remove these sd_err messages. Need to figure
+ out how to keep interrupts disabled until DHD registers
+ a handler.
+ */
+ sd_err(("bcmsdh_sdmmc: ***IRQHandler\n"));
+
+ sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
+ __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
+ }
+
+ sdio_claim_host(sd->func[0]);
+ sdio_claim_host_unlock_local(sd);
+}
+
+/* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */
+static void IRQHandlerF2(struct sdio_func *func)
+{
+ sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n"));
+}
+#endif /* !defined(OOB_INTR_ONLY) */
+
+#ifdef NOTUSED
+/* Write client card reg */
+static int
+sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
+{
+
+ if ((func == 0) || (regsize == 1)) {
+ uint8 temp;
+
+ temp = data & 0xff;
+ sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
+ sd_data(("%s: byte write data=0x%02x\n",
+ __FUNCTION__, data));
+ } else {
+ if (regsize == 2)
+ data &= 0xffff;
+
+ sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data, regsize);
+
+ sd_data(("%s: word write data=0x%08x\n",
+ __FUNCTION__, data));
+ }
+
+ return SUCCESS;
+}
+#endif /* NOTUSED */
+
+#if defined(ENABLE_INSMOD_NO_FW_LOAD) && !defined(BUS_POWER_RESTORE)
+static int sdio_sw_reset(sdioh_info_t *sd)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) && defined(MMC_SW_RESET)
+ struct mmc_host *host = sd->func[0]->card->host;
+#endif
+ int err = 0;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) && defined(MMC_SW_RESET)
+ printf("%s: Enter\n", __FUNCTION__);
+ sdio_claim_host(sd->func[0]);
+ err = mmc_sw_reset(host);
+ sdio_release_host(sd->func[0]);
+#else
+ err = sdio_reset_comm(sd->func[0]->card);
+#endif
+
+ if (err)
+ sd_err(("%s Failed, error = %d\n", __FUNCTION__, err));
+
+ return err;
+}
+#endif
+
+int
+sdioh_start(sdioh_info_t *sd, int stage)
+{
+#if defined(OEM_ANDROID)
+ int ret;
+
+ if (!sd) {
+ sd_err(("%s Failed, sd is NULL\n", __FUNCTION__));
+ return (0);
+ }
+
+ /* Need to do this stages as we can't enable the interrupt till
+ downloading of the firmware is complete, other wise polling
+ sdio access will come in way
+ */
+ if (sd->func[0]) {
+ if (stage == 0) {
+ /* Since the power to the chip is killed, we will have
+ re enumerate the device again. Set the block size
+ and enable the fucntion 1 for in preparation for
+ downloading the code
+ */
+ /* sdio_reset_comm() - has been fixed in latest kernel/msm.git for Linux
+ 2.6.27. The implementation prior to that is buggy, and needs broadcom's
+ patch for it
+ */
+#if defined(ENABLE_INSMOD_NO_FW_LOAD) && !defined(BUS_POWER_RESTORE)
+ if ((ret = sdio_sw_reset(sd))) {
+ sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret));
+ return ret;
+ } else
+#endif
+ {
+ sd->num_funcs = 2;
+ sd->sd_blockmode = TRUE;
+ sd->use_client_ints = TRUE;
+ sd->client_block_size[0] = 64;
+
+ if (sd->func[1]) {
+ /* Claim host controller */
+ sdio_claim_host(sd->func[1]);
+
+ sd->client_block_size[1] = 64;
+ ret = sdio_set_block_size(sd->func[1], 64);
+ if (ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to set F1 "
+ "blocksize(%d)\n", ret));
+ }
+
+ /* Release host controller F1 */
+ sdio_release_host(sd->func[1]);
+ }
+
+ if (sd->func[2]) {
+ /* Claim host controller F2 */
+ sdio_claim_host(sd->func[2]);
+
+ sd->client_block_size[2] = sd_f2_blocksize;
+ printf("%s: set sd_f2_blocksize %d\n", __FUNCTION__, sd_f2_blocksize);
+ ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize);
+ if (ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to set F2 "
+ "blocksize to %d(%d)\n", sd_f2_blocksize, ret));
+ }
+
+ /* Release host controller F2 */
+ sdio_release_host(sd->func[2]);
+ }
+
+ sdioh_sdmmc_card_enablefuncs(sd);
+ }
+ } else {
+#if !defined(OOB_INTR_ONLY)
+ sdio_claim_host(sd->func[0]);
+ if (sd->func[2])
+ sdio_claim_irq(sd->func[2], IRQHandlerF2);
+ if (sd->func[1])
+ sdio_claim_irq(sd->func[1], IRQHandler);
+ sdio_release_host(sd->func[0]);
+#else /* defined(OOB_INTR_ONLY) */
+#if defined(HW_OOB)
+ sdioh_enable_func_intr(sd);
+#endif
+ bcmsdh_oob_intr_set(sd->bcmsdh, TRUE);
+#endif /* !defined(OOB_INTR_ONLY) */
+ }
+ }
+ else
+ sd_err(("%s Failed\n", __FUNCTION__));
+#endif /* defined(OEM_ANDROID) */
+
+ return (0);
+}
+
+int
+sdioh_stop(sdioh_info_t *sd)
+{
+#if defined(OEM_ANDROID)
+ /* MSM7201A Android sdio stack has bug with interrupt
+ So internaly within SDIO stack they are polling
+ which cause issue when device is turned off. So
+ unregister interrupt with SDIO stack to stop the
+ polling
+ */
+#if !defined(OOB_INTR_ONLY)
+ sdio_claim_host_lock_local(sd);
+#endif
+ if (sd->func[0]) {
+#if !defined(OOB_INTR_ONLY)
+ sdio_claim_host(sd->func[0]);
+ if (sd->func[1])
+ sdio_release_irq(sd->func[1]);
+ if (sd->func[2])
+ sdio_release_irq(sd->func[2]);
+ sdio_release_host(sd->func[0]);
+#else /* defined(OOB_INTR_ONLY) */
+#if defined(HW_OOB)
+ sdioh_disable_func_intr(sd);
+#endif
+ bcmsdh_oob_intr_set(sd->bcmsdh, FALSE);
+#endif /* !defined(OOB_INTR_ONLY) */
+ }
+ else
+ sd_err(("%s Failed\n", __FUNCTION__));
+#endif /* defined(OEM_ANDROID) */
+#if !defined(OOB_INTR_ONLY)
+ sdio_claim_host_unlock_local(sd);
+#endif
+ return (0);
+}
+
+int
+sdioh_waitlockfree(sdioh_info_t *sd)
+{
+ return (1);
+}
+
+#ifdef BCMINTERNAL
+extern SDIOH_API_RC
+sdioh_test_diag(sdioh_info_t *sd)
+{
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+ sd_trace(("%s: Exit\n", __FUNCTION__));
+ return (0);
+}
+#endif /* BCMINTERNAL */
+
+SDIOH_API_RC
+sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio)
+{
+ return SDIOH_API_RC_FAIL;
+}
+
+SDIOH_API_RC
+sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab)
+{
+ return SDIOH_API_RC_FAIL;
+}
+
+bool
+sdioh_gpioin(sdioh_info_t *sd, uint32 gpio)
+{
+ return FALSE;
+}
+
+SDIOH_API_RC
+sdioh_gpio_init(sdioh_info_t *sd)
+{
+ return SDIOH_API_RC_FAIL;
+}
+
+uint
+sdmmc_get_clock_rate(sdioh_info_t *sd)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)) || (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+ struct sdio_func *sdio_func = sd->func[0];
+ struct mmc_host *host = sdio_func->card->host;
+ return mmc_host_clk_rate(host);
+#else
+ return 0;
+#endif
+}
+
+void
+sdmmc_set_clock_rate(sdioh_info_t *sd, uint hz)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)) || (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+ struct sdio_func *sdio_func = sd->func[0];
+ struct mmc_host *host = sdio_func->card->host;
+ struct mmc_ios *ios = &host->ios;
+
+ mmc_host_clk_hold(host);
+ DHD_INFO(("%s: Before change: sd clock rate is %u\n", __FUNCTION__, ios->clock));
+ if (hz < host->f_min) {
+ DHD_ERROR(("%s: Intended rate is below min rate, setting to min\n", __FUNCTION__));
+ hz = host->f_min;
+ }
+
+ if (hz > host->f_max) {
+ DHD_ERROR(("%s: Intended rate exceeds max rate, setting to max\n", __FUNCTION__));
+ hz = host->f_max;
+ }
+ ios->clock = hz;
+ host->ops->set_ios(host, ios);
+ DHD_ERROR(("%s: After change: sd clock rate is %u\n", __FUNCTION__, ios->clock));
+ mmc_host_clk_release(host);
+#else
+ return;
+#endif
+}
+
+void
+sdmmc_set_clock_divisor(sdioh_info_t *sd, uint sd_div)
+{
+ uint hz;
+ uint old_div = sdmmc_get_clock_rate(sd);
+ if (old_div == sd_div) {
+ return;
+ }
+
+ hz = sd->sd_clk_rate / sd_div;
+ sdmmc_set_clock_rate(sd, hz);
+}
diff --git a/bcmdhd.101.10.361.x/bcmsdh_sdmmc_linux.c b/bcmdhd.101.10.361.x/bcmsdh_sdmmc_linux.c
new file mode 100755
index 0000000..1dfb408
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmsdh_sdmmc_linux.c
@@ -0,0 +1,388 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <sdio.h> /* SDIO Device and Protocol Specs */
+#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h> /* to get msglevel bit values */
+
+#include <linux/sched.h> /* request_irq() */
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <dhd_linux.h>
+#include <bcmsdh_sdmmc.h>
+#include <dhd_dbg.h>
+#include <bcmdevs.h>
+
+#if !defined(SDIO_VENDOR_ID_BROADCOM)
+#define SDIO_VENDOR_ID_BROADCOM 0x02d0
+#endif /* !defined(SDIO_VENDOR_ID_BROADCOM) */
+
+#define SDIO_DEVICE_ID_BROADCOM_DEFAULT 0x0000
+
+extern void wl_cfg80211_set_parent_dev(void *dev);
+extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
+extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd);
+extern void* bcmsdh_probe(osl_t *osh, void *dev, void *sdioh, void *adapter_info, uint bus_type,
+ uint bus_num, uint slot_num);
+extern int bcmsdh_remove(bcmsdh_info_t *bcmsdh);
+
+int sdio_function_init(void);
+void sdio_function_cleanup(void);
+
+#define DESCRIPTION "bcmsdh_sdmmc Driver"
+#define AUTHOR "Broadcom Corporation"
+
+/* module param defaults */
+static int clockoverride = 0;
+
+module_param(clockoverride, int, 0644);
+MODULE_PARM_DESC(clockoverride, "SDIO card clock override");
+
+#ifdef GLOBAL_SDMMC_INSTANCE
+PBCMSDH_SDMMC_INSTANCE gInstance;
+#endif
+
+/* Maximum number of bcmsdh_sdmmc devices supported by driver */
+#define BCMSDH_SDMMC_MAX_DEVICES 1
+
+extern volatile bool dhd_mmc_suspend;
+
+static int sdioh_probe(struct sdio_func *func)
+{
+ int host_idx = func->card->host->index;
+ uint32 rca = func->card->rca;
+ wifi_adapter_info_t *adapter;
+ osl_t *osh = NULL;
+ sdioh_info_t *sdioh = NULL;
+
+ sd_err(("bus num (host idx)=%d, slot num (rca)=%d\n", host_idx, rca));
+ adapter = dhd_wifi_platform_get_adapter(SDIO_BUS, host_idx, rca);
+ if (adapter != NULL) {
+ sd_err(("found adapter info '%s'\n", adapter->name));
+ adapter->bus_type = SDIO_BUS;
+ adapter->bus_num = host_idx;
+ adapter->slot_num = rca;
+ adapter->sdio_func = func;
+ } else
+ sd_err(("can't find adapter info for this chip\n"));
+
+#ifdef WL_CFG80211
+ wl_cfg80211_set_parent_dev(&func->dev);
+#endif
+
+ /* allocate SDIO Host Controller state info */
+ osh = osl_attach(&func->dev, SDIO_BUS, TRUE);
+ if (osh == NULL) {
+ sd_err(("%s: osl_attach failed\n", __FUNCTION__));
+ goto fail;
+ }
+ osl_static_mem_init(osh, adapter);
+ sdioh = sdioh_attach(osh, func);
+ if (sdioh == NULL) {
+ sd_err(("%s: sdioh_attach failed\n", __FUNCTION__));
+ goto fail;
+ }
+ sdioh->bcmsdh = bcmsdh_probe(osh, &func->dev, sdioh, adapter, SDIO_BUS, host_idx, rca);
+ if (sdioh->bcmsdh == NULL) {
+ sd_err(("%s: bcmsdh_probe failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ sdio_set_drvdata(func, sdioh);
+ return 0;
+
+fail:
+ if (sdioh != NULL)
+ sdioh_detach(osh, sdioh);
+ if (osh != NULL)
+ osl_detach(osh);
+ return -ENOMEM;
+}
+
+static void sdioh_remove(struct sdio_func *func)
+{
+ sdioh_info_t *sdioh;
+ osl_t *osh;
+
+ sdioh = sdio_get_drvdata(func);
+ if (sdioh == NULL) {
+ sd_err(("%s: error, no sdioh handler found\n", __FUNCTION__));
+ return;
+ }
+ sd_err(("%s: Enter\n", __FUNCTION__));
+
+ osh = sdioh->osh;
+ bcmsdh_remove(sdioh->bcmsdh);
+ sdioh_detach(osh, sdioh);
+ osl_detach(osh);
+}
+
+static int bcmsdh_sdmmc_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ int ret = 0;
+
+ if (func == NULL)
+ return -EINVAL;
+
+ sd_err(("%s: Enter num=%d\n", __FUNCTION__, func->num));
+ sd_info(("sdio_bcmsdh: func->class=%x\n", func->class));
+ sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
+ sd_info(("sdio_device: 0x%04x\n", func->device));
+ sd_info(("Function#: 0x%04x\n", func->num));
+
+#ifdef GLOBAL_SDMMC_INSTANCE
+ gInstance->func[func->num] = func;
+#endif
+
+ /* 4318 doesn't have function 2 */
+ if ((func->num == 2) || (func->num == 1 && func->device == 0x4))
+ ret = sdioh_probe(func);
+
+ return ret;
+}
+
+static void bcmsdh_sdmmc_remove(struct sdio_func *func)
+{
+ if (func == NULL) {
+ sd_err(("%s is called with NULL SDIO function pointer\n", __FUNCTION__));
+ return;
+ }
+
+ sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
+ sd_info(("sdio_bcmsdh: func->class=%x\n", func->class));
+ sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
+ sd_info(("sdio_device: 0x%04x\n", func->device));
+ sd_info(("Function#: 0x%04x\n", func->num));
+
+ if ((func->num == 2) || (func->num == 1 && func->device == 0x4))
+ sdioh_remove(func);
+}
+
+/* devices we support, null terminated */
+static const struct sdio_device_id bcmsdh_sdmmc_ids[] = {
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_DEFAULT) },
+ /* XXX This should not be in the external release, as it will attach to any SDIO
+ * device, even non-WLAN devices.
+ * Need to add IDs for the FALCON-based chips and put this under BCMINTERNAL
+ { SDIO_DEVICE_CLASS(SDIO_CLASS_NONE) },
+ */
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM4362_CHIP_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43751_CHIP_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43752_CHIP_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43012_CHIP_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43014_CHIP_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43014_D11N_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43014_D11N2G_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43014_D11N5G_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43013_CHIP_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43013_D11N_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43013_D11N2G_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43013_D11N5G_ID) },
+ { SDIO_DEVICE_CLASS(SDIO_CLASS_NONE) },
+ { 0, 0, 0, 0 /* end: all zeroes */
+ },
+};
+
+MODULE_DEVICE_TABLE(sdio, bcmsdh_sdmmc_ids);
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM)
+static int bcmsdh_sdmmc_suspend(struct device *pdev)
+{
+ int err;
+ sdioh_info_t *sdioh;
+ struct sdio_func *func = dev_to_sdio_func(pdev);
+ mmc_pm_flag_t sdio_flags;
+
+ printf("%s Enter func->num=%d\n", __FUNCTION__, func->num);
+ if (func->num != 2)
+ return 0;
+
+ dhd_mmc_suspend = TRUE;
+ sdioh = sdio_get_drvdata(func);
+ err = bcmsdh_suspend(sdioh->bcmsdh);
+ if (err) {
+ printf("%s bcmsdh_suspend err=%d\n", __FUNCTION__, err);
+ dhd_mmc_suspend = FALSE;
+ return err;
+ }
+
+ sdio_flags = sdio_get_host_pm_caps(func);
+ if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
+ sd_err(("%s: can't keep power while host is suspended\n", __FUNCTION__));
+ dhd_mmc_suspend = FALSE;
+ return -EINVAL;
+ }
+
+ /* keep power while host suspended */
+ err = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+ if (err) {
+ sd_err(("%s: error while trying to keep power\n", __FUNCTION__));
+ dhd_mmc_suspend = FALSE;
+ return err;
+ }
+ smp_mb();
+
+ printf("%s Exit\n", __FUNCTION__);
+ return 0;
+}
+
+static int bcmsdh_sdmmc_resume(struct device *pdev)
+{
+ sdioh_info_t *sdioh;
+ struct sdio_func *func = dev_to_sdio_func(pdev);
+
+ printf("%s Enter func->num=%d\n", __FUNCTION__, func->num);
+ if (func->num != 2)
+ return 0;
+
+ dhd_mmc_suspend = FALSE;
+ sdioh = sdio_get_drvdata(func);
+ bcmsdh_resume(sdioh->bcmsdh);
+
+ smp_mb();
+ printf("%s Exit\n", __FUNCTION__);
+ return 0;
+}
+
+static const struct dev_pm_ops bcmsdh_sdmmc_pm_ops = {
+ .suspend = bcmsdh_sdmmc_suspend,
+ .resume = bcmsdh_sdmmc_resume,
+};
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */
+
+#if defined(BCMLXSDMMC)
+static struct semaphore *notify_semaphore = NULL;
+
+static int dummy_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ sd_err(("%s: enter\n", __FUNCTION__));
+ if (func && (func->num != 2)) {
+ return 0;
+ }
+
+ if (notify_semaphore)
+ up(notify_semaphore);
+ return 0;
+}
+
+static void dummy_remove(struct sdio_func *func)
+{
+}
+
+static struct sdio_driver dummy_sdmmc_driver = {
+ .probe = dummy_probe,
+ .remove = dummy_remove,
+ .name = "dummy_sdmmc",
+ .id_table = bcmsdh_sdmmc_ids,
+ };
+
+int sdio_func_reg_notify(void* semaphore)
+{
+ notify_semaphore = semaphore;
+ return sdio_register_driver(&dummy_sdmmc_driver);
+}
+
+void sdio_func_unreg_notify(void)
+{
+ OSL_SLEEP(15);
+ sdio_unregister_driver(&dummy_sdmmc_driver);
+}
+
+#endif /* defined(BCMLXSDMMC) */
+
+static struct sdio_driver bcmsdh_sdmmc_driver = {
+ .probe = bcmsdh_sdmmc_probe,
+ .remove = bcmsdh_sdmmc_remove,
+ .name = "bcmsdh_sdmmc",
+ .id_table = bcmsdh_sdmmc_ids,
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM)
+ .drv = {
+ .pm = &bcmsdh_sdmmc_pm_ops,
+ },
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */
+ };
+
+struct sdos_info {
+ sdioh_info_t *sd;
+ spinlock_t lock;
+};
+
+/* Interrupt enable/disable */
+SDIOH_API_RC
+sdioh_interrupt_set(sdioh_info_t *sd, bool enable)
+{
+ if (!sd)
+ return BCME_BADARG;
+
+ sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling"));
+ return SDIOH_API_RC_SUCCESS;
+}
+
+#ifdef BCMSDH_MODULE
+static int __init
+bcmsdh_module_init(void)
+{
+ int error = 0;
+ error = sdio_function_init();
+ return error;
+}
+
+static void __exit
+bcmsdh_module_cleanup(void)
+{
+ sdio_function_cleanup();
+}
+
+module_init(bcmsdh_module_init);
+module_exit(bcmsdh_module_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(DESCRIPTION);
+MODULE_AUTHOR(AUTHOR);
+
+#endif /* BCMSDH_MODULE */
+/*
+ * module init
+*/
+int bcmsdh_register_client_driver(void)
+{
+ return sdio_register_driver(&bcmsdh_sdmmc_driver);
+}
+
+/*
+ * module cleanup
+*/
+void bcmsdh_unregister_client_driver(void)
+{
+ sdio_unregister_driver(&bcmsdh_sdmmc_driver);
+}
diff --git a/bcmdhd.101.10.361.x/bcmsdspi.h b/bcmdhd.101.10.361.x/bcmsdspi.h
new file mode 100755
index 0000000..9a29370
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmsdspi.h
@@ -0,0 +1,147 @@
+/*
+ * SD-SPI Protocol Conversion - BCMSDH->SPI Translation Layer
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmsdspi.h 833013 2019-08-02 16:26:31Z jl904071 $
+ */
+#ifndef _BCM_SD_SPI_H
+#define _BCM_SD_SPI_H
+
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+
+#ifdef BCMDBG
+#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0)
+#define sd_trace(x) do { if (sd_msglevel & SDH_TRACE_VAL) printf x; } while (0)
+#define sd_info(x) do { if (sd_msglevel & SDH_INFO_VAL) printf x; } while (0)
+#define sd_debug(x) do { if (sd_msglevel & SDH_DEBUG_VAL) printf x; } while (0)
+#define sd_data(x) do { if (sd_msglevel & SDH_DATA_VAL) printf x; } while (0)
+#define sd_ctrl(x) do { if (sd_msglevel & SDH_CTRL_VAL) printf x; } while (0)
+#else
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+#endif
+
+#ifdef BCMPERFSTATS
+#define sd_log(x) do { if (sd_msglevel & SDH_LOG_VAL) bcmlog x; } while (0)
+#else
+#define sd_log(x)
+#endif
+
+#define SDIOH_ASSERT(exp) \
+ do { if (!(exp)) \
+ printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+ } while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS 0
+#undef ERROR
+#define ERROR 1
+
+/* private bus modes */
+#define SDIOH_MODE_SPI 0
+
+#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */
+#define USE_MULTIBLOCK 0x4
+
+struct sdioh_info {
+ uint cfg_bar; /* pci cfg address for bar */
+ uint32 caps; /* cached value of capabilities reg */
+ uint bar0; /* BAR0 for PCI Device */
+ osl_t *osh; /* osh handler */
+ void *controller; /* Pointer to SPI Controller's private data struct */
+
+ uint lockcount; /* nest count of sdspi_lock() calls */
+ bool client_intr_enabled; /* interrupt connnected flag */
+ bool intr_handler_valid; /* client driver interrupt handler valid */
+ sdioh_cb_fn_t intr_handler; /* registered interrupt handler */
+ void *intr_handler_arg; /* argument to call interrupt handler */
+ bool initialized; /* card initialized */
+ uint32 target_dev; /* Target device ID */
+ uint32 intmask; /* Current active interrupts */
+ void *sdos_info; /* Pointer to per-OS private data */
+
+ uint32 controller_type; /* Host controller type */
+ uint8 version; /* Host Controller Spec Compliance Version */
+ uint irq; /* Client irq */
+ uint32 intrcount; /* Client interrupts */
+ uint32 local_intrcount; /* Controller interrupts */
+ bool host_init_done; /* Controller initted */
+ bool card_init_done; /* Client SDIO interface initted */
+ bool polled_mode; /* polling for command completion */
+
+ bool sd_use_dma; /* DMA on CMD53 */
+ bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+ /* Must be on for sd_multiblock to be effective */
+ bool use_client_ints; /* If this is false, make sure to restore */
+ bool got_hcint; /* Host Controller interrupt. */
+ /* polling hack in wl_linux.c:wl_timer() */
+ int adapter_slot; /* Maybe dealing with multiple slots/controllers */
+ int sd_mode; /* SD1/SD4/SPI */
+ int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */
+ uint32 data_xfer_count; /* Current register transfer size */
+ uint32 cmd53_wr_data; /* Used to pass CMD53 write data */
+ uint32 card_response; /* Used to pass back response status byte */
+ uint32 card_rsp_data; /* Used to pass back response data word */
+ uint16 card_rca; /* Current Address */
+ uint8 num_funcs; /* Supported funcs on client */
+ uint32 com_cis_ptr;
+ uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
+ void *dma_buf;
+ ulong dma_phys;
+ int r_cnt; /* rx count */
+ int t_cnt; /* tx_count */
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdspi.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/**************************************************************
+ * Internal interfaces: bcmsdspi.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *spi_reg_map(osl_t *osh, uintptr addr, int size);
+extern void spi_reg_unmap(osl_t *osh, uintptr addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int spi_register_irq(sdioh_info_t *sd, uint irq);
+extern void spi_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void spi_lock(sdioh_info_t *sd);
+extern void spi_unlock(sdioh_info_t *sd);
+
+/* Allocate/init/free per-OS private data */
+extern int spi_osinit(sdioh_info_t *sd);
+extern void spi_osfree(sdioh_info_t *sd);
+
+#endif /* _BCM_SD_SPI_H */
diff --git a/bcmdhd.101.10.361.x/bcmsdspi_linux.c b/bcmdhd.101.10.361.x/bcmsdspi_linux.c
new file mode 100755
index 0000000..b771682
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmsdspi_linux.c
@@ -0,0 +1,433 @@
+/*
+ * Broadcom SPI Host Controller Driver - Linux Per-port
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+
+#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h> /* to get msglevel bit values */
+
+#ifdef BCMSPI_ANDROID
+#include <bcmsdh.h>
+#include <bcmspibrcm.h>
+#include <linux/spi/spi.h>
+#else
+#include <pcicfg.h>
+#include <sdio.h> /* SDIO Device and Protocol Specs */
+#include <linux/sched.h> /* request_irq(), free_irq() */
+#include <bcmsdspi.h>
+#include <bcmspi.h>
+#endif /* BCMSPI_ANDROID */
+
+#ifndef BCMSPI_ANDROID
+extern uint sd_crc;
+module_param(sd_crc, uint, 0);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define KERNEL26
+#endif
+#endif /* !BCMSPI_ANDROID */
+
+struct sdos_info {
+ sdioh_info_t *sd;
+ spinlock_t lock;
+#ifndef BCMSPI_ANDROID
+ wait_queue_head_t intr_wait_queue;
+#endif /* !BCMSPI_ANDROID */
+};
+
+#ifndef BCMSPI_ANDROID
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define BLOCKABLE() (!in_atomic())
+#else
+#define BLOCKABLE() (!in_interrupt()) /* XXX Doesn't handle CONFIG_PREEMPT? */
+#endif
+
+/* Interrupt handler */
+static irqreturn_t
+sdspi_isr(int irq, void *dev_id
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
+, struct pt_regs *ptregs
+#endif
+)
+{
+ sdioh_info_t *sd;
+ struct sdos_info *sdos;
+ bool ours;
+
+ sd = (sdioh_info_t *)dev_id;
+ sd->local_intrcount++;
+
+ if (!sd->card_init_done) {
+ sd_err(("%s: Hey Bogus intr...not even initted: irq %d\n", __FUNCTION__, irq));
+ return IRQ_RETVAL(FALSE);
+ } else {
+ ours = spi_check_client_intr(sd, NULL);
+
+ /* For local interrupts, wake the waiting process */
+ if (ours && sd->got_hcint) {
+ sdos = (struct sdos_info *)sd->sdos_info;
+ wake_up_interruptible(&sdos->intr_wait_queue);
+ }
+
+ return IRQ_RETVAL(ours);
+ }
+}
+#endif /* !BCMSPI_ANDROID */
+
+#ifdef BCMSPI_ANDROID
+static struct spi_device *gBCMSPI = NULL;
+
+extern int bcmsdh_probe(struct device *dev);
+extern int bcmsdh_remove(struct device *dev);
+
+static int bcmsdh_spi_probe(struct spi_device *spi_dev)
+{
+ int ret = 0;
+
+ gBCMSPI = spi_dev;
+
+#ifdef SPI_PIO_32BIT_RW
+ spi_dev->bits_per_word = 32;
+#else
+ spi_dev->bits_per_word = 8;
+#endif /* SPI_PIO_32BIT_RW */
+ ret = spi_setup(spi_dev);
+
+ if (ret) {
+ sd_err(("bcmsdh_spi_probe: spi_setup fail with %d\n", ret));
+ }
+ sd_err(("bcmsdh_spi_probe: spi_setup with %d, bits_per_word=%d\n",
+ ret, spi_dev->bits_per_word));
+ ret = bcmsdh_probe(&spi_dev->dev);
+
+ return ret;
+}
+
+static int bcmsdh_spi_remove(struct spi_device *spi_dev)
+{
+ int ret = 0;
+
+ ret = bcmsdh_remove(&spi_dev->dev);
+ gBCMSPI = NULL;
+
+ return ret;
+}
+
+static struct spi_driver bcmsdh_spi_driver = {
+ .probe = bcmsdh_spi_probe,
+ .remove = bcmsdh_spi_remove,
+ .driver = {
+ .name = "wlan_spi",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+};
+
+/*
+ * module init
+*/
+int bcmsdh_register_client_driver(void)
+{
+ int error = 0;
+ sd_trace(("bcmsdh_gspi: %s Enter\n", __FUNCTION__));
+
+ error = spi_register_driver(&bcmsdh_spi_driver);
+
+ return error;
+}
+
+/*
+ * module cleanup
+*/
+void bcmsdh_unregister_client_driver(void)
+{
+ sd_trace(("%s Enter\n", __FUNCTION__));
+ spi_unregister_driver(&bcmsdh_spi_driver);
+}
+#endif /* BCMSPI_ANDROID */
+
+/* Register with Linux for interrupts */
+int
+spi_register_irq(sdioh_info_t *sd, uint irq)
+{
+#ifndef BCMSPI_ANDROID
+ sd_trace(("Entering %s: irq == %d\n", __FUNCTION__, irq));
+ if (request_irq(irq, sdspi_isr, IRQF_SHARED, "bcmsdspi", sd) < 0) {
+ sd_err(("%s: request_irq() failed\n", __FUNCTION__));
+ return ERROR;
+ }
+#endif /* !BCMSPI_ANDROID */
+ return SUCCESS;
+}
+
+/* Free Linux irq */
+void
+spi_free_irq(uint irq, sdioh_info_t *sd)
+{
+#ifndef BCMSPI_ANDROID
+ free_irq(irq, sd);
+#endif /* !BCMSPI_ANDROID */
+}
+
+/* Map Host controller registers */
+#ifndef BCMSPI_ANDROID
+uint32 *
+spi_reg_map(osl_t *osh, uintptr addr, int size)
+{
+ return (uint32 *)REG_MAP(addr, size);
+}
+
+void
+spi_reg_unmap(osl_t *osh, uintptr addr, int size)
+{
+ REG_UNMAP((void*)(uintptr)addr);
+}
+#endif /* !BCMSPI_ANDROID */
+
+int
+spi_osinit(sdioh_info_t *sd)
+{
+ struct sdos_info *sdos;
+
+ sdos = (struct sdos_info*)MALLOC(sd->osh, sizeof(struct sdos_info));
+ sd->sdos_info = (void*)sdos;
+ if (sdos == NULL)
+ return BCME_NOMEM;
+
+ sdos->sd = sd;
+ spin_lock_init(&sdos->lock);
+#ifndef BCMSPI_ANDROID
+ init_waitqueue_head(&sdos->intr_wait_queue);
+#endif /* !BCMSPI_ANDROID */
+ return BCME_OK;
+}
+
+void
+spi_osfree(sdioh_info_t *sd)
+{
+ struct sdos_info *sdos;
+ ASSERT(sd && sd->sdos_info);
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ MFREE(sd->osh, sdos, sizeof(struct sdos_info));
+}
+
+/* Interrupt enable/disable */
+SDIOH_API_RC
+sdioh_interrupt_set(sdioh_info_t *sd, bool enable)
+{
+ ulong flags;
+ struct sdos_info *sdos;
+
+ sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling"));
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ ASSERT(sdos);
+
+ if (!(sd->host_init_done && sd->card_init_done)) {
+ sd_err(("%s: Card & Host are not initted - bailing\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+
+#ifndef BCMSPI_ANDROID
+ if (enable && !(sd->intr_handler && sd->intr_handler_arg)) {
+ sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+#endif /* !BCMSPI_ANDROID */
+
+ /* Ensure atomicity for enable/disable calls */
+ spin_lock_irqsave(&sdos->lock, flags);
+
+ sd->client_intr_enabled = enable;
+#ifndef BCMSPI_ANDROID
+ if (enable && !sd->lockcount)
+ spi_devintr_on(sd);
+ else
+ spi_devintr_off(sd);
+#endif /* !BCMSPI_ANDROID */
+
+ spin_unlock_irqrestore(&sdos->lock, flags);
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+/* Protect against reentrancy (disable device interrupts while executing) */
+void
+spi_lock(sdioh_info_t *sd)
+{
+ ulong flags;
+ struct sdos_info *sdos;
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ ASSERT(sdos);
+
+ sd_trace(("%s: %d\n", __FUNCTION__, sd->lockcount));
+
+ spin_lock_irqsave(&sdos->lock, flags);
+ if (sd->lockcount) {
+ sd_err(("%s: Already locked!\n", __FUNCTION__));
+ ASSERT(sd->lockcount == 0);
+ }
+#ifdef BCMSPI_ANDROID
+ if (sd->client_intr_enabled)
+ bcmsdh_oob_intr_set(0);
+#else
+ spi_devintr_off(sd);
+#endif /* BCMSPI_ANDROID */
+ sd->lockcount++;
+ spin_unlock_irqrestore(&sdos->lock, flags);
+}
+
+/* Enable client interrupt */
+void
+spi_unlock(sdioh_info_t *sd)
+{
+ ulong flags;
+ struct sdos_info *sdos;
+
+ sd_trace(("%s: %d, %d\n", __FUNCTION__, sd->lockcount, sd->client_intr_enabled));
+ ASSERT(sd->lockcount > 0);
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ ASSERT(sdos);
+
+ spin_lock_irqsave(&sdos->lock, flags);
+ if (--sd->lockcount == 0 && sd->client_intr_enabled) {
+#ifdef BCMSPI_ANDROID
+ bcmsdh_oob_intr_set(1);
+#else
+ spi_devintr_on(sd);
+#endif /* BCMSPI_ANDROID */
+ }
+ spin_unlock_irqrestore(&sdos->lock, flags);
+}
+
+#ifndef BCMSPI_ANDROID
+void spi_waitbits(sdioh_info_t *sd, bool yield)
+{
+#ifndef BCMSDYIELD
+ ASSERT(!yield);
+#endif
+ sd_trace(("%s: yield %d canblock %d\n",
+ __FUNCTION__, yield, BLOCKABLE()));
+
+ /* Clear the "interrupt happened" flag and last intrstatus */
+ sd->got_hcint = FALSE;
+
+#ifdef BCMSDYIELD
+ if (yield && BLOCKABLE()) {
+ struct sdos_info *sdos;
+ sdos = (struct sdos_info *)sd->sdos_info;
+ /* Wait for the indication, the interrupt will be masked when the ISR fires. */
+ wait_event_interruptible(sdos->intr_wait_queue, (sd->got_hcint));
+ } else
+#endif /* BCMSDYIELD */
+ {
+ spi_spinbits(sd);
+ }
+
+}
+#else /* !BCMSPI_ANDROID */
+int bcmgspi_dump = 0; /* Set to dump complete trace of all SPI bus transactions */
+
+static void
+hexdump(char *pfx, unsigned char *msg, int msglen)
+{
+ int i, col;
+ char buf[80];
+
+ ASSERT(strlen(pfx) + 49 <= sizeof(buf));
+
+ col = 0;
+
+ for (i = 0; i < msglen; i++, col++) {
+ if (col % 16 == 0)
+ strcpy(buf, pfx);
+ sprintf(buf + strlen(buf), "%02x", msg[i]);
+ if ((col + 1) % 16 == 0)
+ printf("%s\n", buf);
+ else
+ sprintf(buf + strlen(buf), " ");
+ }
+
+ if (col % 16 != 0)
+ printf("%s\n", buf);
+}
+
+/* Send/Receive an SPI Packet */
+void
+spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen)
+{
+ int write = 0;
+ int tx_len = 0;
+ struct spi_message msg;
+ struct spi_transfer t[2];
+
+ spi_message_init(&msg);
+ memset(t, 0, 2*sizeof(struct spi_transfer));
+
+ if (sd->wordlen == 2)
+#if !(defined(SPI_PIO_RW_BIGENDIAN) && defined(SPI_PIO_32BIT_RW))
+ write = msg_out[2] & 0x80; /* XXX bit 7: read:0, write :1 */
+#else
+ write = msg_out[1] & 0x80; /* XXX bit 7: read:0, write :1 */
+#endif /* !(defined(SPI_PIO_RW_BIGENDIAN) && defined(SPI_PIO_32BIT_RW)) */
+ if (sd->wordlen == 4)
+#if !(defined(SPI_PIO_RW_BIGENDIAN) && defined(SPI_PIO_32BIT_RW))
+ write = msg_out[0] & 0x80; /* XXX bit 7: read:0, write :1 */
+#else
+ write = msg_out[3] & 0x80; /* XXX bit 7: read:0, write :1 */
+#endif /* !(defined(SPI_PIO_RW_BIGENDIAN) && defined(SPI_PIO_32BIT_RW)) */
+
+ if (bcmgspi_dump) {
+ hexdump(" OUT: ", msg_out, msglen);
+ }
+
+ tx_len = write ? msglen-4 : 4;
+
+ sd_trace(("spi_sendrecv: %s, wordlen %d, cmd : 0x%02x 0x%02x 0x%02x 0x%02x\n",
+ write ? "WR" : "RD", sd->wordlen,
+ msg_out[0], msg_out[1], msg_out[2], msg_out[3]));
+
+ t[0].tx_buf = (char *)&msg_out[0];
+ t[0].rx_buf = 0;
+ t[0].len = tx_len;
+
+ spi_message_add_tail(&t[0], &msg);
+
+ t[1].rx_buf = (char *)&msg_in[tx_len];
+ t[1].tx_buf = 0;
+ t[1].len = msglen-tx_len;
+
+ spi_message_add_tail(&t[1], &msg);
+ spi_sync(gBCMSPI, &msg);
+
+ if (bcmgspi_dump) {
+ hexdump(" IN : ", msg_in, msglen);
+ }
+}
+#endif /* !BCMSPI_ANDROID */
diff --git a/bcmdhd.101.10.361.x/bcmsdstd.c b/bcmdhd.101.10.361.x/bcmsdstd.c
new file mode 100755
index 0000000..b58de62
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmsdstd.c
@@ -0,0 +1,5406 @@
+/*
+ * 'Standard' SDIO HOST CONTROLLER driver
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#include <typedefs.h>
+
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <osl.h>
+#include <siutils.h>
+#include <sdio.h> /* SDIO Device and Protocol Specs */
+#include <sdioh.h> /* Standard SDIO Host Controller Specification */
+#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h> /* ioctl/iovars */
+#include <pcicfg.h>
+#include <bcmsdstd.h>
+/* XXX Quick NDIS hack */
+#ifdef NDIS
+#define inline __inline
+#define PCI_CFG_VID 0
+#define PCI_CFG_BAR0 0x10
+#endif
+
+#define SD_PAGE_BITS 12
+#define SD_PAGE (1 << SD_PAGE_BITS)
+#define SDSTD_MAX_TUNING_PHASE 5
+
+/*
+ * Upper GPIO 16 - 31 are available on J22
+ * J22.pin3 == gpio16, J22.pin5 == gpio17, etc.
+ * Lower GPIO 0 - 15 are available on J15 (WL_GPIO)
+ */
+#define SDH_GPIO16 16
+#define SDH_GPIO_ENABLE 0xffff
+
+#include <bcmsdstd.h>
+#include <sbsdio.h> /* SDIOH (host controller) core hardware definitions */
+
+/* Globals */
+uint sd_msglevel = SDH_ERROR_VAL;
+
+uint sd_hiok = TRUE; /* Use hi-speed mode if available? */
+uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */
+uint sd_f2_blocksize = 64; /* Default blocksize */
+uint sd_f1_blocksize = BLOCK_SIZE_4318; /* Default blocksize */
+
+#define sd3_trace(x)
+
+/* sd3ClkMode: 0-SDR12 [25MHz]
+ * 1-SDR25 [50MHz]+SHS=1
+ * 2-SDR50 [100MHz]+SSDR50=1
+ * 3-SDR104 [208MHz]+SSDR104=1
+ * 4-DDR50 [50MHz]+SDDR50=1
+ */
+#define SD3CLKMODE_0_SDR12 (0)
+#define SD3CLKMODE_1_SDR25 (1)
+#define SD3CLKMODE_2_SDR50 (2)
+#define SD3CLKMODE_3_SDR104 (3)
+#define SD3CLKMODE_4_DDR50 (4)
+#define SD3CLKMODE_DISABLED (-1)
+#define SD3CLKMODE_AUTO (99)
+
+/* values for global_UHSI_Supp : Means host and card caps match. */
+#define HOST_SDR_UNSUPP (0)
+#define HOST_SDR_12_25 (1)
+#define HOST_SDR_50_104_DDR (2)
+
+/* depends-on/affects sd3_autoselect_uhsi_max.
+ * see sd3_autoselect_uhsi_max
+ */
+int sd_uhsimode = SD3CLKMODE_DISABLED;
+uint sd_tuning_period = CAP3_RETUNING_TC_OTHER;
+uint sd_delay_value = 500000;
+/* Enables host to dongle glomming. Also increases the
+ * dma buffer size. This will increase the rx throughput
+ * as there will be lesser CMD53 transactions
+ */
+#ifdef BCMSDIOH_TXGLOM
+uint sd_txglom;
+#ifdef LINUX
+module_param(sd_txglom, uint, 0);
+#endif
+#endif /* BCMSDIOH_TXGLOM */
+
+char dhd_sdiod_uhsi_ds_override[2] = {' '};
+
+#define MAX_DTS_INDEX (3)
+#define DRVSTRN_MAX_CHAR ('D')
+#define DRVSTRN_IGNORE_CHAR (' ')
+
+char DTS_vals[MAX_DTS_INDEX + 1] = {
+ 0x1, /* Driver Strength Type-A */
+ 0x0, /* Driver Strength Type-B */
+ 0x2, /* Driver Strength Type-C */
+ 0x3, /* Driver Strength Type-D */
+ };
+
+/* depends-on/affects sd_uhsimode.
+ select MAX speed automatically based on caps of host and card.
+ If this is 1, sd_uhsimode will be ignored. If the sd_uhsimode is set
+ by the user specifically, this var becomes 0. default value: 0. [XXX:TBD: for future]
+ */
+uint32 sd3_autoselect_uhsi_max = 0;
+
+#define MAX_TUNING_ITERS (40)
+/* (150+10)millisecs total time; so dividing it for per-loop */
+#define PER_TRY_TUNING_DELAY_MS (160/MAX_TUNING_ITERS)
+#define CLKTUNING_MAX_BRR_RETRIES (1000) /* 1 ms: 1000 retries with 1 us delay per loop */
+
+/* table analogous to preset value register.
+* This is bcos current HC doesn't have preset value reg support.
+* All has DrvStr as 'B' [val:0] and CLKGEN as 0.
+*/
+static unsigned short presetval_sw_table[] = {
+ 0x0520, /* initialization: DrvStr:'B' [0]; CLKGen:0;
+ * SDCLKFreqSel: 520 [division: 320*2 = 640: ~400 KHz]
+ */
+ 0x0008, /* default speed:DrvStr:'B' [0]; CLKGen:0;
+ * SDCLKFreqSel: 8 [division: 6*2 = 12: ~25 MHz]
+ */
+ 0x0004, /* High speed: DrvStr:'B' [0]; CLKGen:0;
+ * SDCLKFreqSel: 4 [division: 3*2 = 6: ~50 MHz]
+ */
+ 0x0008, /* SDR12: DrvStr:'B' [0]; CLKGen:0;
+ * SDCLKFreqSel: 8 [division: 6*2 = 12: ~25 MHz]
+ */
+ 0x0004, /* SDR25: DrvStr:'B' [0]; CLKGen:0;
+ * SDCLKFreqSel: 4 [division: 3*2 = 6: ~50 MHz]
+ */
+ 0x0001, /* SDR50: DrvStr:'B' [0]; CLKGen:0;
+ * SDCLKFreqSel: 2 [division: 1*2 = 2: ~100 MHz]
+ */
+ 0x0001, /* SDR104: DrvStr:'B' [0]; CLKGen:0;
+ SDCLKFreqSel: 1 [no division: ~255/~208 MHz]
+ */
+ 0x0002 /* DDR50: DrvStr:'B' [0]; CLKGen:0;
+ SDCLKFreqSel: 4 [division: 3*2 = 6: ~50 MHz]
+ */
+};
+
+/* This is to have software overrides to the hardware. Info follows:
+ For override [1]: Preset registers: not supported
+ Voltage switch: not supported
+ Clock Tuning: not supported
+*/
+bool sd3_sw_override1 = FALSE;
+bool sd3_sw_read_magic_bytes = FALSE;
+
+#define SD3_TUNING_REQD(sd, sd_uhsimode) ((sd_uhsimode != SD3CLKMODE_DISABLED) && \
+ (sd->version == HOST_CONTR_VER_3) && \
+ ((sd_uhsimode == SD3CLKMODE_3_SDR104) || \
+ ((sd_uhsimode == SD3CLKMODE_2_SDR50) && \
+ (GFIELD(sd->caps3, CAP3_TUNING_SDR50)))))
+
+/* find next power of 2 */
+#define NEXT_POW2(n) {n--; n |= n>>1; n |= n>>2; n |= n>>4; n++;}
+
+#ifdef BCMSDYIELD
+bool sd_yieldcpu = TRUE; /* Allow CPU yielding for buffer requests */
+uint sd_minyield = 0; /* Minimum xfer size to allow CPU yield */
+bool sd_forcerb = FALSE; /* Force sync readback in intrs_on/off */
+#endif
+
+/* XXX: Issues with CMD14 enter/exit sleep
+ * XXX: Temp fix for special CMD14 handling
+ */
+#define F1_SLEEPCSR_ADDR 0x1001F
+
+uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz
+ :might get changed in code for 208
+ */
+
+uint sd_power = 1; /* Default to SD Slot powered ON */
+uint sd_3_power_save = 1; /* Default to SDIO 3.0 power save */
+uint sd_clock = 1; /* Default to SD Clock turned ON */
+uint sd_pci_slot = 0xFFFFffff; /* Used to force selection of a particular PCI slot */
+uint8 sd_dma_mode = DMA_MODE_AUTO; /* Default to AUTO & program based on capability */
+
+/* XXX Base timeout counter value on 48MHz (2^20 @ 48MHz => 21845us)
+ * Could adjust by adding sd_divisor (to maintain bit count) but really
+ * need something more elaborate to do that right. Still allows xfer
+ * of about 1000 bytes at 400KHz, so constant is ok.
+ * Timeout control N produces 2^(13+N) counter.
+ */
+uint sd_toctl = 7;
+static bool trap_errs = FALSE;
+
+static const char *dma_mode_description[] = { "PIO", "SDMA", "ADMA1", "32b ADMA2", "64b ADMA2" };
+
+/* Prototypes */
+static bool sdstd_start_clock(sdioh_info_t *sd, uint16 divisor);
+static uint16 sdstd_start_power(sdioh_info_t *sd, int volts_req);
+static bool sdstd_bus_width(sdioh_info_t *sd, int width);
+static int sdstd_set_highspeed_mode(sdioh_info_t *sd, bool HSMode);
+static int sdstd_set_dma_mode(sdioh_info_t *sd, int8 dma_mode);
+static int sdstd_card_enablefuncs(sdioh_info_t *sd);
+static void sdstd_cmd_getrsp(sdioh_info_t *sd, uint32 *rsp_buffer, int count);
+static int sdstd_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd, uint32 arg);
+static int sdstd_card_regread(sdioh_info_t *sd, int func, uint32 regaddr,
+ int regsize, uint32 *data);
+static int sdstd_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr,
+ int regsize, uint32 data);
+static int sdstd_driver_init(sdioh_info_t *sd);
+static bool sdstd_reset(sdioh_info_t *sd, bool host_reset, bool client_reset);
+static int sdstd_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo,
+ uint32 addr, int nbytes, uint32 *data);
+static int sdstd_abort(sdioh_info_t *sd, uint func);
+static int sdstd_check_errs(sdioh_info_t *sdioh_info, uint32 cmd, uint32 arg);
+static int set_client_block_size(sdioh_info_t *sd, int func, int blocksize);
+static void sd_map_dma(sdioh_info_t * sd);
+static void sd_unmap_dma(sdioh_info_t * sd);
+static void sd_clear_adma_dscr_buf(sdioh_info_t *sd);
+static void sd_fill_dma_data_buf(sdioh_info_t *sd, uint8 data);
+static void sd_create_adma_descriptor(sdioh_info_t *sd,
+ uint32 index, uint32 addr_phys,
+ uint16 length, uint16 flags);
+static void sd_dump_adma_dscr(sdioh_info_t *sd);
+static void sdstd_dumpregs(sdioh_info_t *sd);
+
+static int sdstd_3_set_highspeed_uhsi_mode(sdioh_info_t *sd, int sd3ClkMode);
+static int sdstd_3_sigvoltswitch_proc(sdioh_info_t *sd);
+static int sdstd_3_get_matching_uhsi_clkmode(sdioh_info_t *sd,
+ int sd3_requested_clkmode);
+static bool sdstd_3_get_matching_drvstrn(sdioh_info_t *sd,
+ int sd3_requested_clkmode, uint32 *drvstrn, uint16 *presetval);
+static int sdstd_3_clock_wrapper(sdioh_info_t *sd);
+static int sdstd_clock_wrapper(sdioh_info_t *sd);
+
+#ifdef BCMINTERNAL
+#ifdef NOTUSED
+static int parse_caps(uint32 caps_reg, char *buf, int len);
+static int parse_state(uint32 state_reg, char *buf, int len);
+static void cis_fetch(sdioh_info_t *sd, int func, char *data, int len);
+#endif /* NOTUSED */
+#endif /* BCMINTERNAL */
+
+#ifdef BCMDBG
+static void print_regs(sdioh_info_t *sd);
+#endif
+
+/*
+ * Private register access routines.
+ */
+
+/* 16 bit PCI regs */
+
+/* XXX This is a hack to satisfy the -Wmissing-prototypes warning */
+extern uint16 sdstd_rreg16(sdioh_info_t *sd, uint reg);
+uint16
+sdstd_rreg16(sdioh_info_t *sd, uint reg)
+{
+
+ volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg);
+ sd_ctrl(("16: R Reg 0x%02x, Data 0x%x\n", reg, data));
+ return data;
+}
+
+/* XXX This is a hack to satisfy the -Wmissing-prototypes warning */
+extern void sdstd_wreg16(sdioh_info_t *sd, uint reg, uint16 data);
+void
+sdstd_wreg16(sdioh_info_t *sd, uint reg, uint16 data)
+{
+ *(volatile uint16 *)(sd->mem_space + reg) = (uint16) data;
+ sd_ctrl(("16: W Reg 0x%02x, Data 0x%x\n", reg, data));
+}
+
+static void
+sdstd_or_reg16(sdioh_info_t *sd, uint reg, uint16 val)
+{
+ volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg);
+ sd_ctrl(("16: OR Reg 0x%02x, Val 0x%x\n", reg, val));
+ data |= val;
+ *(volatile uint16 *)(sd->mem_space + reg) = (uint16)data;
+
+}
+static void
+sdstd_mod_reg16(sdioh_info_t *sd, uint reg, int16 mask, uint16 val)
+{
+
+ volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg);
+ sd_ctrl(("16: MOD Reg 0x%02x, Mask 0x%x, Val 0x%x\n", reg, mask, val));
+ data &= ~mask;
+ data |= (val & mask);
+ *(volatile uint16 *)(sd->mem_space + reg) = (uint16)data;
+}
+
+/* 32 bit PCI regs */
+static uint32
+sdstd_rreg(sdioh_info_t *sd, uint reg)
+{
+ volatile uint32 data = *(volatile uint32 *)(sd->mem_space + reg);
+ sd_ctrl(("32: R Reg 0x%02x, Data 0x%x\n", reg, data));
+ return data;
+}
+static inline void
+sdstd_wreg(sdioh_info_t *sd, uint reg, uint32 data)
+{
+ *(volatile uint32 *)(sd->mem_space + reg) = (uint32)data;
+ sd_ctrl(("32: W Reg 0x%02x, Data 0x%x\n", reg, data));
+
+}
+#ifdef BCMINTERNAL
+#ifdef NOTUSED
+static void
+sdstd_or_reg(sdioh_info_t *sd, uint reg, uint32 val)
+{
+ volatile uint32 data = *(volatile uint32 *)(sd->mem_space + reg);
+ data |= val;
+ *(volatile uint32 *)(sd->mem_space + reg) = (volatile uint32)data;
+}
+static void
+sdstd_mod_reg(sdioh_info_t *sd, uint reg, uint32 mask, uint32 val)
+{
+ volatile uint32 data = *(volatile uint32 *)(sd->mem_space + reg);
+ data &= ~mask;
+ data |= (val & mask);
+ *(volatile uint32 *)(sd->mem_space + reg) = (volatile uint32)data;
+}
+#endif /* NOTUSED */
+#endif /* BCMINTERNAL */
+
+/* 8 bit PCI regs */
+static inline void
+sdstd_wreg8(sdioh_info_t *sd, uint reg, uint8 data)
+{
+ *(volatile uint8 *)(sd->mem_space + reg) = (uint8)data;
+ sd_ctrl(("08: W Reg 0x%02x, Data 0x%x\n", reg, data));
+}
+static uint8
+sdstd_rreg8(sdioh_info_t *sd, uint reg)
+{
+ volatile uint8 data = *(volatile uint8 *)(sd->mem_space + reg);
+ sd_ctrl(("08: R Reg 0x%02x, Data 0x%x\n", reg, data));
+ return data;
+}
+
+/*
+ * Private work routines
+ */
+
+sdioh_info_t *glob_sd;
+
+/*
+ * Public entry points & extern's
+ */
+extern sdioh_info_t *
+sdioh_attach(osl_t *osh, void *bar0, uint irq)
+{
+ sdioh_info_t *sd;
+
+ sd_trace(("%s\n", __FUNCTION__));
+ if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
+ sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+ return NULL;
+ }
+ bzero((char *)sd, sizeof(sdioh_info_t));
+ glob_sd = sd;
+ sd->osh = osh;
+ if (sdstd_osinit(sd) != 0) {
+ sd_err(("%s:sdstd_osinit() failed\n", __FUNCTION__));
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return NULL;
+ }
+ sd->mem_space = (volatile char *)sdstd_reg_map(osh, (ulong)bar0, SDIOH_REG_WINSZ);
+ sd_init_dma(sd);
+ sd->irq = irq;
+ if (sd->mem_space == NULL) {
+ sd_err(("%s:ioremap() failed\n", __FUNCTION__));
+ sdstd_osfree(sd);
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return NULL;
+ }
+ sd_info(("%s:sd->mem_space = %p\n", __FUNCTION__, sd->mem_space));
+ sd->intr_handler = NULL;
+ sd->intr_handler_arg = NULL;
+ sd->intr_handler_valid = FALSE;
+
+ /* Set defaults */
+ sd->sd_blockmode = TRUE;
+ sd->use_client_ints = TRUE;
+ sd->sd_dma_mode = sd_dma_mode;
+
+ /* XXX Haven't figured out how to make bytemode work with dma */
+ if (!sd->sd_blockmode)
+ sd->sd_dma_mode = DMA_MODE_NONE;
+
+ if (sdstd_driver_init(sd) != SUCCESS) {
+ /* If host CPU was reset without resetting SD bus or
+ SD device, the device will still have its RCA but
+ driver no longer knows what it is (since driver has been restarted).
+ go through once to clear the RCA and a gain reassign it.
+ */
+ sd_info(("driver_init failed - Reset RCA and try again\n"));
+ if (sdstd_driver_init(sd) != SUCCESS) {
+ sd_err(("%s:driver_init() failed()\n", __FUNCTION__));
+ if (sd->mem_space) {
+ sdstd_reg_unmap(osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ);
+ sd->mem_space = NULL;
+ }
+ sdstd_osfree(sd);
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return (NULL);
+ }
+ }
+
+ /* XXX Needed for NDIS as its OSL checks for correct dma address width
+ * This value is normally set by wlc_attach() which has yet to run
+ */
+ OSL_DMADDRWIDTH(osh, 32);
+
+ /* Always map DMA buffers, so we can switch between DMA modes. */
+ sd_map_dma(sd);
+
+ if (sdstd_register_irq(sd, irq) != SUCCESS) {
+ sd_err(("%s: sdstd_register_irq() failed for irq = %d\n", __FUNCTION__, irq));
+ sdstd_free_irq(sd->irq, sd);
+ if (sd->mem_space) {
+ sdstd_reg_unmap(osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ);
+ sd->mem_space = NULL;
+ }
+
+ sdstd_osfree(sd);
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return (NULL);
+ }
+
+ sd_trace(("%s: Done\n", __FUNCTION__));
+ return sd;
+}
+
+extern SDIOH_API_RC
+sdioh_detach(osl_t *osh, sdioh_info_t *sd)
+{
+ sd_trace(("%s\n", __FUNCTION__));
+ if (sd) {
+ sd_unmap_dma(sd);
+ sdstd_wreg16(sd, SD_IntrSignalEnable, 0);
+ if (sd->sd3_tuning_reqd == TRUE) {
+ sdstd_3_osclean_tuning(sd);
+ sd->sd3_tuning_reqd = FALSE;
+ }
+ sd->sd3_tuning_disable = FALSE;
+ sd_trace(("%s: freeing irq %d\n", __FUNCTION__, sd->irq));
+ sdstd_free_irq(sd->irq, sd);
+ if (sd->card_init_done)
+ sdstd_reset(sd, 1, 1);
+ if (sd->mem_space) {
+ sdstd_reg_unmap(osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ);
+ sd->mem_space = NULL;
+ }
+
+ sdstd_osfree(sd);
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ }
+ return SDIOH_API_RC_SUCCESS;
+}
+
+/* Configure callback to client when we receive client interrupt */
+extern SDIOH_API_RC
+sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+ sd->intr_handler = fn;
+ sd->intr_handler_arg = argh;
+ sd->intr_handler_valid = TRUE;
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_deregister(sdioh_info_t *sd)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+ sd->intr_handler_valid = FALSE;
+ sd->intr_handler = NULL;
+ sd->intr_handler_arg = NULL;
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+ *onoff = sd->client_intr_enabled;
+ return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(DHD_DEBUG) || defined(BCMDBG)
+extern bool
+sdioh_interrupt_pending(sdioh_info_t *sd)
+{
+ uint16 intrstatus;
+ intrstatus = sdstd_rreg16(sd, SD_IntrStatus);
+ return !!(intrstatus & CLIENT_INTR);
+}
+#endif
+
+uint
+sdioh_query_iofnum(sdioh_info_t *sd)
+{
+ return sd->num_funcs;
+}
+
+/* IOVar table */
+enum {
+ IOV_MSGLEVEL = 1,
+ IOV_BLOCKMODE,
+ IOV_BLOCKSIZE,
+ IOV_DMA,
+ IOV_USEINTS,
+ IOV_NUMINTS,
+ IOV_NUMLOCALINTS,
+ IOV_HOSTREG,
+ IOV_DEVREG,
+ IOV_DIVISOR,
+ IOV_SDMODE,
+ IOV_HISPEED,
+ IOV_HCIREGS,
+ IOV_POWER,
+ IOV_POWER_SAVE,
+ IOV_YIELDCPU,
+ IOV_MINYIELD,
+ IOV_FORCERB,
+ IOV_CLOCK,
+ IOV_UHSIMOD,
+ IOV_TUNEMOD,
+ IOV_TUNEDIS
+};
+
+const bcm_iovar_t sdioh_iovars[] = {
+ {"sd_msglevel", IOV_MSGLEVEL, 0, 0, IOVT_UINT32, 0 },
+ {"sd_blockmode", IOV_BLOCKMODE, 0, 0, IOVT_BOOL, 0 },
+ {"sd_blocksize", IOV_BLOCKSIZE, 0, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */
+ {"sd_dma", IOV_DMA, 0, 0, IOVT_UINT32, 0 },
+#ifdef BCMSDYIELD
+ {"sd_yieldcpu", IOV_YIELDCPU, 0, 0, IOVT_BOOL, 0 },
+ {"sd_minyield", IOV_MINYIELD, 0, 0, IOVT_UINT32, 0 },
+ {"sd_forcerb", IOV_FORCERB, 0, 0, IOVT_BOOL, 0 },
+#endif
+ {"sd_ints", IOV_USEINTS, 0, 0, IOVT_BOOL, 0 },
+ {"sd_numints", IOV_NUMINTS, 0, 0, IOVT_UINT32, 0 },
+ {"sd_numlocalints", IOV_NUMLOCALINTS, 0, 0, IOVT_UINT32, 0 },
+ {"sd_hostreg", IOV_HOSTREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sd_devreg", IOV_DEVREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sd_divisor", IOV_DIVISOR, 0, 0, IOVT_UINT32, 0 },
+ {"sd_power", IOV_POWER, 0, 0, IOVT_UINT32, 0 },
+ {"sd_power_save", IOV_POWER_SAVE, 0, 0, IOVT_UINT32, 0 },
+ {"sd_clock", IOV_CLOCK, 0, 0, IOVT_UINT32, 0 },
+ {"sd_mode", IOV_SDMODE, 0, 0, IOVT_UINT32, 100},
+ {"sd_highspeed", IOV_HISPEED, 0, 0, IOVT_UINT32, 0},
+ {"sd_uhsimode", IOV_UHSIMOD, 0, 0, IOVT_UINT32, 0},
+#ifdef BCMDBG
+ {"sd_hciregs", IOV_HCIREGS, 0, 0, IOVT_BUFFER, 0 },
+#endif
+ {"tuning_mode", IOV_TUNEMOD, 0, 0, IOVT_UINT32, 0},
+ {"sd3_tuning_disable", IOV_TUNEDIS, 0, 0, IOVT_BOOL, 0},
+
+ {NULL, 0, 0, 0, 0, 0 }
+};
+uint8 sdstd_turn_on_clock(sdioh_info_t *sd)
+{
+ sdstd_or_reg16(sd, SD_ClockCntrl, 0x4);
+ return 0;
+}
+
+uint8 sdstd_turn_off_clock(sdioh_info_t *sd)
+{
+ sdstd_wreg16(sd, SD_ClockCntrl, sdstd_rreg16(sd, SD_ClockCntrl) & ~((uint16)0x4));
+ return 0;
+}
+
+int
+sdioh_iovar_op(sdioh_info_t *si, const char *name,
+ void *params, int plen, void *arg, uint len, bool set)
+{
+ const bcm_iovar_t *vi = NULL;
+ int bcmerror = 0;
+ uint val_size;
+ int32 int_val = 0;
+ bool bool_val;
+ uint32 actionid;
+
+ ASSERT(name);
+
+ /* Get must have return space; Set does not take qualifiers */
+ ASSERT(set || (arg && len));
+ ASSERT(!set || (!params && !plen));
+
+ sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
+
+ if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
+ bcmerror = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
+ goto exit;
+
+ /* XXX Copied from dhd, copied from wl; certainly overkill here? */
+ /* Set up params so get and set can share the convenience variables */
+ if (params == NULL) {
+ params = arg;
+ plen = len;
+ }
+
+ if (vi->type == IOVT_VOID)
+ val_size = 0;
+ else if (vi->type == IOVT_BUFFER)
+ val_size = len;
+ else
+ val_size = sizeof(int);
+
+ if (plen >= (int)sizeof(int_val))
+ bcopy(params, &int_val, sizeof(int_val));
+
+ bool_val = (int_val != 0) ? TRUE : FALSE;
+ BCM_REFERENCE(bool_val);
+
+ actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+ switch (actionid) {
+ case IOV_GVAL(IOV_MSGLEVEL):
+ int_val = (int32)sd_msglevel;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_MSGLEVEL):
+ sd_msglevel = int_val;
+ break;
+
+ case IOV_GVAL(IOV_BLOCKMODE):
+ int_val = (int32)si->sd_blockmode;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_BLOCKMODE):
+ si->sd_blockmode = (bool)int_val;
+ /* Haven't figured out how to make non-block mode with DMA */
+ if (!si->sd_blockmode)
+ si->sd_dma_mode = DMA_MODE_NONE;
+ break;
+
+#ifdef BCMSDYIELD
+ case IOV_GVAL(IOV_YIELDCPU):
+ int_val = sd_yieldcpu;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_YIELDCPU):
+ sd_yieldcpu = (bool)int_val;
+ break;
+
+ case IOV_GVAL(IOV_MINYIELD):
+ int_val = sd_minyield;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_MINYIELD):
+ sd_minyield = (bool)int_val;
+ break;
+
+ case IOV_GVAL(IOV_FORCERB):
+ int_val = sd_forcerb;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_FORCERB):
+ sd_forcerb = (bool)int_val;
+ break;
+#endif /* BCMSDYIELD */
+
+ case IOV_GVAL(IOV_BLOCKSIZE):
+ if ((uint32)int_val > si->num_funcs) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ int_val = (int32)si->client_block_size[int_val];
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_BLOCKSIZE):
+ {
+ uint func = ((uint32)int_val >> 16);
+ uint blksize = (uint16)int_val;
+ uint maxsize;
+
+ if (func > si->num_funcs) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ /* XXX These hardcoded sizes are a hack, remove after proper CIS parsing. */
+ switch (func) {
+ case 0: maxsize = 32; break;
+ case 1: maxsize = BLOCK_SIZE_4318; break;
+ case 2: maxsize = BLOCK_SIZE_4328; break;
+ default: maxsize = 0;
+ }
+ if (blksize > maxsize) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ if (!blksize) {
+ blksize = maxsize;
+ }
+
+ /* Now set it */
+ sdstd_lock(si);
+ bcmerror = set_client_block_size(si, func, blksize);
+ sdstd_unlock(si);
+ break;
+ }
+
+ case IOV_GVAL(IOV_DMA):
+ int_val = (int32)si->sd_dma_mode;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DMA):
+ si->sd_dma_mode = (char)int_val;
+ sdstd_set_dma_mode(si, si->sd_dma_mode);
+ break;
+
+ case IOV_GVAL(IOV_USEINTS):
+ int_val = (int32)si->use_client_ints;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_USEINTS):
+ si->use_client_ints = (bool)int_val;
+ if (si->use_client_ints)
+ si->intmask |= CLIENT_INTR;
+ else
+ si->intmask &= ~CLIENT_INTR;
+ break;
+
+ case IOV_GVAL(IOV_DIVISOR):
+ int_val = (uint32)sd_divisor;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DIVISOR):
+ sd_divisor = int_val;
+ if (!sdstd_start_clock(si, (uint16)sd_divisor)) {
+ sd_err(("set clock failed!\n"));
+ bcmerror = BCME_ERROR;
+ }
+ break;
+
+ case IOV_GVAL(IOV_POWER):
+ int_val = (uint32)sd_power;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_POWER_SAVE):
+ int_val = (uint32)sd_3_power_save;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_POWER):
+ sd_power = int_val;
+ if (sd_power == 1) {
+ if (sdstd_driver_init(si) != SUCCESS) {
+ sd_err(("set SD Slot power failed!\n"));
+ bcmerror = BCME_ERROR;
+ } else {
+ sd_err(("SD Slot Powered ON.\n"));
+ }
+ } else {
+ uint8 pwr = 0;
+
+ pwr = SFIELD(pwr, PWR_BUS_EN, 0);
+ sdstd_wreg8(si, SD_PwrCntrl, pwr); /* Set Voltage level */
+ sd_err(("SD Slot Powered OFF.\n"));
+ }
+ break;
+
+ case IOV_SVAL(IOV_POWER_SAVE):
+ sd_3_power_save = int_val;
+ break;
+
+ case IOV_GVAL(IOV_CLOCK):
+ int_val = (uint32)sd_clock;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_CLOCK):
+ sd_clock = int_val;
+ if (sd_clock == 1) {
+ sd_info(("SD Clock turned ON.\n"));
+ if (!sdstd_start_clock(si, (uint16)sd_divisor)) {
+ sd_err(("sdstd_start_clock failed\n"));
+ bcmerror = BCME_ERROR;
+ }
+ } else {
+ /* turn off HC clock */
+ sdstd_wreg16(si, SD_ClockCntrl,
+ sdstd_rreg16(si, SD_ClockCntrl) & ~((uint16)0x4));
+
+ sd_info(("SD Clock turned OFF.\n"));
+ }
+ break;
+
+ case IOV_GVAL(IOV_SDMODE):
+ int_val = (uint32)sd_sdmode;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SDMODE):
+ sd_sdmode = int_val;
+
+ if (!sdstd_bus_width(si, sd_sdmode)) {
+ sd_err(("sdstd_bus_width failed\n"));
+ bcmerror = BCME_ERROR;
+ }
+ break;
+
+ case IOV_GVAL(IOV_HISPEED):
+ int_val = (uint32)sd_hiok;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_HISPEED):
+ sd_hiok = int_val;
+ bcmerror = sdstd_set_highspeed_mode(si, (bool)sd_hiok);
+ break;
+
+ case IOV_GVAL(IOV_UHSIMOD):
+ sd3_trace(("%s: Get UHSI: \n", __FUNCTION__));
+ int_val = (int)sd_uhsimode;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_UHSIMOD):
+ {
+ int oldval = sd_uhsimode; /* save old, working value */
+ sd3_trace(("%s: Set UHSI: \n", __FUNCTION__));
+ /* check if UHSI is supported by card/host */
+ if (!(si->card_UHSI_voltage_Supported && si->host_UHSISupported)) {
+ sd_err(("%s:UHSI not suppoted!\n", __FUNCTION__));
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+ /* check for valid values */
+ if (!((int_val == SD3CLKMODE_AUTO) ||
+ (int_val == SD3CLKMODE_DISABLED) ||
+ ((int_val >= SD3CLKMODE_0_SDR12) &&
+ (int_val <= SD3CLKMODE_4_DDR50)))) {
+ sd_err(("%s:CLK: bad arg!\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ sd_uhsimode = int_val;
+ if (SUCCESS != sdstd_3_clock_wrapper(si)) {
+ sd_err(("%s:Error in setting uhsi clkmode:%d,"
+ "restoring back to %d\n", __FUNCTION__,
+ sd_uhsimode, oldval));
+ /* try to set back the old one */
+ sd_uhsimode = oldval;
+ if (SUCCESS != sdstd_3_clock_wrapper(si)) {
+ sd_err(("%s:Error in setting uhsi to old mode;"
+ "ignoring:\n", __FUNCTION__));
+ }
+ }
+ break;
+ }
+#ifdef DHD_DEBUG
+ case IOV_SVAL(IOV_TUNEMOD):
+ {
+
+ if( int_val == SD_DHD_DISABLE_PERIODIC_TUNING) { /* do tuning single time */
+ sd3_trace(("Start tuning from Iovar\n"));
+ si->sd3_tuning_reqd = TRUE;
+ sdstd_enable_disable_periodic_timer(si, int_val);
+ sdstd_lock(si);
+ sdstd_3_clk_tuning(si, sdstd_3_get_uhsi_clkmode(si));
+ sdstd_unlock(si);
+ si->sd3_tuning_reqd = FALSE;
+ }
+ if (int_val == SD_DHD_ENABLE_PERIODIC_TUNING) {
+ sd3_trace(("Enabling automatic tuning\n"));
+ si->sd3_tuning_reqd = TRUE;
+ sdstd_enable_disable_periodic_timer(si, int_val);
+ }
+ break;
+ }
+#endif /* debugging purpose */
+ case IOV_GVAL(IOV_NUMINTS):
+ int_val = (int32)si->intrcount;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_NUMLOCALINTS):
+ int_val = (int32)si->local_intrcount;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_HOSTREG):
+ {
+ /* XXX Should copy for alignment reasons */
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+
+ if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD3_WL_BT_reset_register) {
+ sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__,
+ (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+ sd_ptr->offset));
+ if (sd_ptr->offset & 1)
+ int_val = sdstd_rreg8(si, sd_ptr->offset);
+ else if (sd_ptr->offset & 2)
+ int_val = sdstd_rreg16(si, sd_ptr->offset);
+ else
+ int_val = sdstd_rreg(si, sd_ptr->offset);
+
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+
+ case IOV_SVAL(IOV_HOSTREG):
+ {
+ /* XXX Should copy for alignment reasons */
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+
+ if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD3_WL_BT_reset_register) {
+ sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value,
+ (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+ sd_ptr->offset));
+ if (sd_ptr->offset & 1)
+ sdstd_wreg8(si, sd_ptr->offset, (uint8)sd_ptr->value);
+ else if (sd_ptr->offset & 2)
+ sdstd_wreg16(si, sd_ptr->offset, (uint16)sd_ptr->value);
+ else
+ sdstd_wreg(si, sd_ptr->offset, (uint32)sd_ptr->value);
+
+ break;
+ }
+
+ case IOV_GVAL(IOV_DEVREG):
+ {
+ /* XXX Should copy for alignment reasons */
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+ uint8 data;
+
+ if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+
+ int_val = (int)data;
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+
+ case IOV_SVAL(IOV_DEVREG):
+ {
+ /* XXX Should copy for alignment reasons */
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+ uint8 data = (uint8)sd_ptr->value;
+
+ if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+ break;
+ }
+
+#ifdef BCMDBG
+ case IOV_GVAL(IOV_HCIREGS):
+ {
+ struct bcmstrbuf b;
+ bcm_binit(&b, arg, len);
+
+ sdstd_lock(si);
+ bcm_bprintf(&b, "IntrStatus: 0x%04x ErrorIntrStatus 0x%04x\n",
+ sdstd_rreg16(si, SD_IntrStatus),
+ sdstd_rreg16(si, SD_ErrorIntrStatus));
+ bcm_bprintf(&b, "IntrStatusEnable: 0x%04x ErrorIntrStatusEnable 0x%04x\n",
+ sdstd_rreg16(si, SD_IntrStatusEnable),
+ sdstd_rreg16(si, SD_ErrorIntrStatusEnable));
+ bcm_bprintf(&b, "IntrSignalEnable: 0x%04x ErrorIntrSignalEnable 0x%04x\n",
+ sdstd_rreg16(si, SD_IntrSignalEnable),
+ sdstd_rreg16(si, SD_ErrorIntrSignalEnable));
+ print_regs(si);
+
+ sdstd_unlock(si);
+
+ if (!b.size)
+ bcmerror = BCME_BUFTOOSHORT;
+ break;
+ }
+#endif /* BCMDBG */
+
+ case IOV_SVAL(IOV_TUNEDIS):
+ si->sd3_tuning_disable = (bool)int_val;
+ break;
+
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+exit:
+
+ /* XXX Remove protective lock after clients all clean... */
+ return bcmerror;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+ SDIOH_API_RC status;
+ /* No lock needed since sdioh_request_byte does locking */
+ status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
+ return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+ /* No lock needed since sdioh_request_byte does locking */
+ SDIOH_API_RC status;
+ status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
+ return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
+{
+ uint32 count;
+ int offset;
+ uint32 foo;
+ uint8 *cis = cisd;
+
+ sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
+
+ if (!sd->func_cis_ptr[func]) {
+ bzero(cis, length);
+ return SDIOH_API_RC_FAIL;
+ }
+
+ sdstd_lock(sd);
+ *cis = 0;
+ for (count = 0; count < length; count++) {
+ offset = sd->func_cis_ptr[func] + count;
+ if (sdstd_card_regread(sd, 0, offset, 1, &foo)) {
+ sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
+ sdstd_unlock(sd);
+ return SDIOH_API_RC_FAIL;
+ }
+ *cis = (uint8)(foo & 0xff);
+ cis++;
+ }
+ sdstd_unlock(sd);
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
+{
+ int status = SDIOH_API_RC_SUCCESS;
+ uint32 cmd_arg;
+ uint32 rsp5;
+
+ sdstd_lock(sd);
+ if (rw == SDIOH_READ)
+ sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_PRE_DATA);
+
+ /* Change to DATA_TRANSFER_ONGOING , protection against tuning tasklet */
+ sdstd_3_set_data_state(sd, DATA_TRANSFER_ONGOING);
+
+#ifdef BCMDBG
+ if (sdstd_rreg16 (sd, SD_ErrorIntrStatus) != 0) {
+ sd_err(("%s: Entering: ErririntrStatus 0x%x, intstat = 0x%x\n",
+ __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus),
+ sdstd_rreg16(sd, SD_IntrStatus)));
+ }
+#endif
+ cmd_arg = 0;
+ cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, rw == SDIOH_READ ? 0 : 1);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
+ cmd_arg = SFIELD(cmd_arg, CMD52_DATA, rw == SDIOH_READ ? 0 : *byte);
+
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg)) != SUCCESS) {
+ /* Change to DATA_TRANSFER_IDLE */
+ sdstd_3_set_data_state(sd, DATA_TRANSFER_IDLE);
+ sdstd_unlock(sd);
+ return status;
+ }
+
+ sdstd_cmd_getrsp(sd, &rsp5, 1);
+ if (sdstd_rreg16 (sd, SD_ErrorIntrStatus) != 0) {
+ sd_err(("%s: 1: ErrorintrStatus 0x%x\n",
+ __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus)));
+ status = SDIOH_API_RC_FAIL;
+ }
+ if (GFIELD(rsp5, RSP5_FLAGS) != 0x10) {
+ /* PR 101351: sdiod_aos sleep followed by immediate wakeup
+ * before sdiod_aos takes over has a problem.
+ * While exiting sleep with CMD14, device returning 0x00
+ * Don't flag as error for now for 0x1001f.
+ */
+ if (GFIELD(cmd_arg, CMD52_REG_ADDR) != F1_SLEEPCSR_ADDR) {
+ sd_err(("%s: rsp5 flags is 0x%x\t %d \n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func));
+ }
+ status = SDIOH_API_RC_FAIL;
+ }
+
+ if (GFIELD(rsp5, RSP5_STUFF)) {
+ sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
+ status = SDIOH_API_RC_FAIL;
+ }
+
+ if (rw == SDIOH_READ)
+ *byte = GFIELD(rsp5, RSP5_DATA);
+
+ /* Change to DATA_TRANSFER_IDLE */
+ sdstd_3_set_data_state(sd, DATA_TRANSFER_IDLE);
+
+ /* check if we have to do tuning; if so, start */
+ sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_POST_DATA);
+
+ sdstd_unlock(sd);
+ return status;
+}
+
+extern SDIOH_API_RC
+sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
+ uint32 *word, uint nbytes)
+{
+ int status;
+
+ sdstd_lock(sd);
+
+ sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_PRE_DATA);
+
+ /* Change to DATA_TRANSFER_ONGOING , protection against tuning tasklet */
+ sdstd_3_set_data_state(sd, DATA_TRANSFER_ONGOING);
+
+ if (rw == SDIOH_READ) {
+ status = sdstd_card_regread(sd, func, addr, nbytes, word);
+ } else {
+ status = sdstd_card_regwrite(sd, func, addr, nbytes, *word);
+ }
+
+ /* Change to DATA_TRANSFER_IDLE */
+ sdstd_3_set_data_state(sd, DATA_TRANSFER_IDLE);
+
+ /* check if we have to do tuning; if so, start */
+ sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_POST_DATA);
+
+ sdstd_unlock(sd);
+ return (status == SUCCESS ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+#ifdef BCMSDIOH_TXGLOM
+void
+sdioh_glom_post(sdioh_info_t *sd, uint8 *frame, void *pkt, uint len)
+{
+ BCM_REFERENCE(pkt);
+ sd->glom_info.dma_buf_arr[sd->glom_info.count] = frame;
+ sd->glom_info.nbytes[sd->glom_info.count] = len;
+ /* Convert the frame addr to phy addr for DMA in case of host controller version3 */
+ if (sd->txglom_mode == SDPCM_TXGLOM_MDESC) {
+ sd->glom_info.dma_phys_arr[sd->glom_info.count] = DMA_MAP(sd->osh,
+ frame,
+ len,
+ DMA_TX, 0, 0);
+ }
+ sd->glom_info.count++;
+}
+
+void
+sdioh_glom_clear(sdioh_info_t *sd)
+{
+ int i;
+ /* DMA_MAP is done per frame only if host controller version is 3 */
+ if (sd->txglom_mode == SDPCM_TXGLOM_MDESC) {
+ for (i = 0; i < sd->glom_info.count; i++) {
+ DMA_UNMAP(sd->osh,
+ sd->glom_info.dma_phys_arr[i],
+ sd->glom_info.nbytes[i],
+ DMA_TX, 0, 0);
+ }
+ }
+ sd->glom_info.count = 0;
+}
+
+uint
+sdioh_set_mode(sdioh_info_t *sd, uint mode)
+{
+ if (mode == SDPCM_TXGLOM_CPY)
+ sd->txglom_mode = mode;
+ else if ((mode == SDPCM_TXGLOM_MDESC) && (sd->version == HOST_CONTR_VER_3))
+ sd->txglom_mode = mode;
+
+ return (sd->txglom_mode);
+}
+
+bool
+sdioh_glom_enabled(void)
+{
+ return sd_txglom;
+}
+#endif /* BCMSDIOH_TXGLOM */
+
+extern SDIOH_API_RC
+sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint rw, uint func,
+ uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
+{
+ uint8 is_ddr50 = FALSE;
+ int len;
+ int buflen = (int)buflen_u;
+ bool fifo = (fix_inc == SDIOH_DATA_FIX);
+ uint8 *localbuf = NULL, *tmpbuf = NULL;
+ bool local_blockmode = sd->sd_blockmode;
+ SDIOH_API_RC status = SDIOH_API_RC_SUCCESS;
+
+ sdstd_lock(sd);
+
+ is_ddr50 = (sd_uhsimode == SD3CLKMODE_4_DDR50) ? TRUE : FALSE;
+
+ sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_PRE_DATA);
+
+ /* Change to DATA_TRANSFER_ONGOING , protection against tuning tasklet */
+ sdstd_3_set_data_state(sd, DATA_TRANSFER_ONGOING);
+
+ ASSERT(reg_width == 4);
+ ASSERT(buflen_u < (1 << 30));
+ ASSERT(sd->client_block_size[func]);
+
+#ifdef BCMSDIOH_TXGLOM
+ if (sd_txglom) {
+ while (pkt) {
+ sdioh_glom_post(sd, PKTDATA(sd->osh, pkt), pkt, PKTLEN(sd->osh, pkt));
+ pkt = PKTNEXT(sd->osh, pkt);
+ }
+ }
+#endif
+ sd_data(("%s: %c len %d r_cnt %d t_cnt %d, pkt @0x%p\n",
+ __FUNCTION__, rw == SDIOH_READ ? 'R' : 'W',
+ buflen_u, sd->r_cnt, sd->t_cnt, pkt));
+
+ /* Break buffer down into blocksize chunks:
+ * Bytemode: 1 block at a time.
+ * Blockmode: Multiples of blocksizes at a time w/ max of SD_PAGE.
+ * Both: leftovers are handled last (will be sent via bytemode).
+ */
+ while (buflen > 0) {
+ if (local_blockmode) {
+ int max_tran_size = SD_PAGE;
+#ifdef BCMSDIOH_TXGLOM
+ /* There is no alignment requirement for HC3 */
+ if ((sd->version == HOST_CONTR_VER_3) && sd_txglom)
+ max_tran_size = SD_PAGE * 4;
+#endif
+ /* Max xfer is Page size */
+ len = MIN(max_tran_size, buflen);
+
+ /* Round down to a block boundry */
+ if (buflen > sd->client_block_size[func])
+ len = (len/sd->client_block_size[func]) *
+ sd->client_block_size[func];
+ /* XXX Arasan trashes 3-byte transfers, WAR to add one byte extra. */
+ /* XXX In Case of SDIO3.0 DDR50 mode if no of bytes to be
+ * transferred is odd append one more byte to make it even.
+ * Check If odd bytes can come for SDIO_FUNC_2 also.
+ */
+ if ((func == SDIO_FUNC_1) && (((len % 4) == 3) || (((len % 2) == 1) &&
+ (is_ddr50))) && ((rw == SDIOH_WRITE) || (rw == SDIOH_READ))) {
+ sd_err(("%s: Rounding up buffer to mod4 length.\n", __FUNCTION__));
+ len++;
+ tmpbuf = buffer;
+ if ((localbuf = (uint8 *)MALLOC(sd->osh, len)) == NULL) {
+ sd_err(("out of memory, malloced %d bytes\n",
+ MALLOCED(sd->osh)));
+ status = SDIOH_API_RC_FAIL;
+ goto done;
+ }
+ bcopy(buffer, localbuf, len);
+ buffer = localbuf;
+ }
+ } else {
+ /* Byte mode: One block at a time */
+ len = MIN(sd->client_block_size[func], buflen);
+ }
+
+ if (sdstd_card_buf(sd, rw, func, fifo, addr, len, (uint32 *)buffer) != SUCCESS) {
+ status = SDIOH_API_RC_FAIL;
+ }
+
+ /* XXX Restore len and buffer pointer WAR'ed for Arasan 3-byte transfer problem */
+ /* XXX WAR for SDIO3.0 DDR50 mode. */
+ if (local_blockmode && localbuf) {
+ MFREE(sd->osh, localbuf, len);
+ len--;
+ buffer = tmpbuf;
+ sd_err(("%s: Restoring back buffer ptr and len.\n", __FUNCTION__));
+ }
+
+ if (status == SDIOH_API_RC_FAIL) {
+ goto done;
+ }
+
+ buffer += len;
+ buflen -= len;
+ if (!fifo)
+ addr += len;
+#ifdef BCMSDIOH_TXGLOM
+ /* This loop should not come in case of glommed pkts as it is send in
+ * multiple of blocks or total pkt size less than a block
+ */
+ if (sd->glom_info.count != 0)
+ buflen = 0;
+#endif
+ }
+done:
+
+ /* Change to DATA_TRANSFER_IDLE */
+ sdstd_3_set_data_state(sd, DATA_TRANSFER_IDLE);
+
+ /* check if we have to do tuning; if so, start */
+ sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_POST_DATA);
+
+ sdstd_unlock(sd);
+
+#ifdef BCMSDIOH_TXGLOM
+ if (sd_txglom)
+ sdioh_glom_clear(sd);
+#endif
+
+ return status;
+}
+
+extern SDIOH_API_RC
+sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio)
+{
+ uint offset = 0;
+ uint16 val;
+
+ /* check if upper bank */
+ if (gpio >= SDH_GPIO16) {
+ gpio -= SDH_GPIO16;
+ offset = 2;
+ }
+
+ val = sdstd_rreg16(sd, SD_GPIO_OE + offset);
+ val |= (1 << gpio);
+ sdstd_wreg16(sd, SD_GPIO_OE + offset, val);
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab)
+{
+ uint offset = 0;
+ uint16 val;
+
+ /* check if upper bank */
+ if (gpio >= SDH_GPIO16) {
+ gpio -= SDH_GPIO16;
+ offset = 2;
+ }
+
+ val = sdstd_rreg16(sd, SD_GPIO_Reg + offset);
+ if (enab == TRUE)
+ val |= (1 << gpio);
+ else
+ val &= ~(1 << gpio);
+ sdstd_wreg16(sd, SD_GPIO_Reg + offset, val);
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern bool
+sdioh_gpioin(sdioh_info_t *sd, uint32 gpio)
+{
+ uint offset = 0;
+ uint16 val;
+
+ /* check if upper bank */
+ if (gpio >= SDH_GPIO16) {
+ gpio -= SDH_GPIO16;
+ offset = 2;
+ }
+
+ val = sdstd_rreg16(sd, SD_GPIO_Reg + offset);
+ val = (val >> gpio) & 1;
+
+ return (val == 1);
+}
+
+extern SDIOH_API_RC
+sdioh_gpio_init(sdioh_info_t *sd)
+{
+ uint rev;
+
+ rev = sdstd_rreg16(sd, SD_HostControllerVersion) >> 8;
+
+ /* Only P206 (fpga rev >= 16) supports gpio */
+ if (rev < 16) {
+ sd_err(("%s: gpio not supported in rev %d \n", __FUNCTION__, rev));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ sdstd_wreg16(sd, SD_GPIO_Enable, SDH_GPIO_ENABLE);
+ sdstd_wreg16(sd, SD_GPIO_Enable + 2, SDH_GPIO_ENABLE);
+
+ /* Default to input */
+ sdstd_wreg16(sd, SD_GPIO_OE, 0);
+ sdstd_wreg16(sd, SD_GPIO_OE + 2, 0);
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_sleep(sdioh_info_t *sd, bool enab)
+{
+ SDIOH_API_RC status;
+ uint32 cmd_arg = 0, rsp1 = 0;
+ int retry = 100;
+
+ sdstd_lock(sd);
+
+ cmd_arg = SFIELD(cmd_arg, CMD14_RCA, sd->card_rca);
+ cmd_arg = SFIELD(cmd_arg, CMD14_SLEEP, enab);
+
+ /*
+ * For ExitSleep:
+ * 1) Repeat CMD14 until R1 is received
+ * 2) Send CMD7
+ */
+ status = SDIOH_API_RC_FAIL;
+ while (retry-- > 0) {
+ if ((sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_14, cmd_arg)) == SUCCESS) {
+ status = SDIOH_API_RC_SUCCESS;
+ break;
+ }
+ OSL_DELAY(1400);
+ }
+
+ if (status == SDIOH_API_RC_FAIL) {
+ sd_err(("%s: CMD14: failed! enable:%d\n", __FUNCTION__, enab));
+ goto exit;
+ }
+
+ sdstd_cmd_getrsp(sd, &rsp1, 1);
+ sd_info(("%s: CMD14 OK: cmd_resp:0x%x\n", __FUNCTION__, rsp1));
+
+ /* ExitSleep: Send CMD7 After R1 */
+ if (enab == FALSE) {
+ /* Select the card */
+ cmd_arg = SFIELD(0, CMD7_RCA, sd->card_rca);
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_7, cmd_arg)) != SUCCESS) {
+ sd_err(("%s: CMD14 send CMD7 failed!\n", __FUNCTION__));
+ status = SDIOH_API_RC_FAIL;
+ goto exit;
+ }
+
+ sdstd_cmd_getrsp(sd, &rsp1, 1);
+ if (rsp1 != SDIOH_CMD7_EXP_STATUS) {
+ sd_err(("%s: CMD7 response error. Response = 0x%x!\n",
+ __FUNCTION__, rsp1));
+ status = SDIOH_API_RC_FAIL;
+ goto exit;
+ }
+ }
+
+exit:
+ sdstd_unlock(sd);
+
+ return status;
+}
+
+/* XXX Copied guts of request_byte and cmd_issue. Might make sense to fold this into
+ * those by passing another parameter indicating command type (abort). [But maybe
+ * keeping it separate is better -- if called internally on command failure it's less
+ * recursion to wrap your head around?]
+ */
+static int
+sdstd_abort(sdioh_info_t *sd, uint func)
+{
+ int err = 0;
+ int retries;
+
+ uint16 cmd_reg;
+ uint32 cmd_arg;
+ uint32 rsp5;
+ uint8 rflags;
+
+ uint16 int_reg = 0;
+ uint16 plain_intstatus;
+
+ /* Argument is write to F0 (CCCR) IOAbort with function number */
+ cmd_arg = 0;
+ cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, SDIO_FUNC_0);
+ cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, SDIOD_CCCR_IOABORT);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SD_IO_OP_WRITE);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
+ cmd_arg = SFIELD(cmd_arg, CMD52_DATA, func);
+
+ /* Command is CMD52 write */
+ cmd_reg = 0;
+ cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48_BUSY);
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_ABORT);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX, SDIOH_CMD_52);
+
+ /* XXX Copied from cmd_issue(), but no SPI response handling! */
+ if (sd->sd_mode == SDIOH_MODE_SPI) {
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
+ }
+
+ /* Wait for CMD_INHIBIT to go away as per spec section 3.6.1.1 */
+ /* XXX For a single-threaded driver, what circumstances would result
+ * in cmd_inhibit being on but going off in a short time? Experiment
+ * shows a HW command timeout doesn't leave inhibit on, so maybe a SW
+ * timeout? Then that command should be responsible for clearing...
+ */
+ retries = RETRIES_SMALL;
+ while (GFIELD(sdstd_rreg(sd, SD_PresentState), PRES_CMD_INHIBIT)) {
+ if (retries == RETRIES_SMALL)
+ sd_err(("%s: Waiting for Command Inhibit, state 0x%08x\n",
+ __FUNCTION__, sdstd_rreg(sd, SD_PresentState)));
+ if (!--retries) {
+ sd_err(("%s: Command Inhibit timeout, state 0x%08x\n",
+ __FUNCTION__, sdstd_rreg(sd, SD_PresentState)));
+ if (trap_errs)
+ ASSERT(0);
+ err = BCME_SDIO_ERROR;
+ goto done;
+ }
+ }
+
+ /* Clear errors from any previous commands */
+ if ((plain_intstatus = sdstd_rreg16(sd, SD_ErrorIntrStatus)) != 0) {
+ sd_err(("abort: clearing errstat 0x%04x\n", plain_intstatus));
+ sdstd_wreg16(sd, SD_ErrorIntrStatus, plain_intstatus);
+ }
+ plain_intstatus = sdstd_rreg16(sd, SD_IntrStatus);
+ if (plain_intstatus & ~(SFIELD(0, INTSTAT_CARD_INT, 1))) {
+ sd_err(("abort: intstatus 0x%04x\n", plain_intstatus));
+ if (GFIELD(plain_intstatus, INTSTAT_CMD_COMPLETE)) {
+ sd_err(("SDSTD_ABORT: CMD COMPLETE SET BEFORE COMMAND GIVEN!!!\n"));
+ }
+ if (GFIELD(plain_intstatus, INTSTAT_CARD_REMOVAL)) {
+ sd_err(("SDSTD_ABORT: INTSTAT_CARD_REMOVAL\n"));
+ err = BCME_NODEVICE;
+ goto done;
+ }
+ }
+
+ /* Issue the command */
+ sdstd_wreg(sd, SD_Arg0, cmd_arg);
+ sdstd_wreg16(sd, SD_Command, cmd_reg);
+
+ /* In interrupt mode return, expect later CMD_COMPLETE interrupt */
+ if (!sd->polled_mode)
+ return err;
+
+ /* Otherwise, wait for the command to complete */
+ retries = RETRIES_LARGE;
+ do {
+ int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+ } while (--retries &&
+ (GFIELD(int_reg, INTSTAT_ERROR_INT) == 0) &&
+ (GFIELD(int_reg, INTSTAT_CMD_COMPLETE) == 0));
+
+ /* If command completion fails, do a cmd reset and note the error */
+ if (!retries) {
+ sd_err(("%s: CMD_COMPLETE timeout: intr 0x%04x err 0x%04x state 0x%08x\n",
+ __FUNCTION__, int_reg,
+ sdstd_rreg16(sd, SD_ErrorIntrStatus),
+ sdstd_rreg(sd, SD_PresentState)));
+
+ sdstd_wreg8(sd, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1));
+ retries = RETRIES_LARGE;
+ do {
+ sd_trace(("%s: waiting for CMD line reset\n", __FUNCTION__));
+ } while ((GFIELD(sdstd_rreg8(sd, SD_SoftwareReset),
+ SW_RESET_CMD)) && retries--);
+
+ if (!retries) {
+ sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__));
+ }
+
+ if (trap_errs)
+ ASSERT(0);
+
+ err = BCME_SDIO_ERROR;
+ }
+
+ /* Clear Command Complete interrupt */
+ int_reg = SFIELD(0, INTSTAT_CMD_COMPLETE, 1);
+ sdstd_wreg16(sd, SD_IntrStatus, int_reg);
+
+ /* Check for Errors */
+ if ((plain_intstatus = sdstd_rreg16 (sd, SD_ErrorIntrStatus)) != 0) {
+ sd_err(("%s: ErrorintrStatus: 0x%x, "
+ "(intrstatus = 0x%x, present state 0x%x) clearing\n",
+ __FUNCTION__, plain_intstatus,
+ sdstd_rreg16(sd, SD_IntrStatus),
+ sdstd_rreg(sd, SD_PresentState)));
+
+ sdstd_wreg16(sd, SD_ErrorIntrStatus, plain_intstatus);
+
+ sdstd_wreg8(sd, SD_SoftwareReset, SFIELD(0, SW_RESET_DAT, 1));
+ retries = RETRIES_LARGE;
+ do {
+ sd_trace(("%s: waiting for DAT line reset\n", __FUNCTION__));
+ } while ((GFIELD(sdstd_rreg8(sd, SD_SoftwareReset),
+ SW_RESET_DAT)) && retries--);
+
+ if (!retries) {
+ sd_err(("%s: Timeout waiting for DAT line reset\n", __FUNCTION__));
+ }
+
+ if (trap_errs)
+ ASSERT(0);
+
+ /* ABORT is dataless, only cmd errs count */
+ /* XXX But what about busy timeout? Response valid? */
+ if (plain_intstatus & ERRINT_CMD_ERRS)
+ err = BCME_SDIO_ERROR;
+ }
+
+ /* If command failed don't bother looking at response */
+ if (err)
+ goto done;
+
+ /* Otherwise, check the response */
+ sdstd_cmd_getrsp(sd, &rsp5, 1);
+ rflags = GFIELD(rsp5, RSP5_FLAGS);
+
+ if (rflags & SD_RSP_R5_ERRBITS) {
+ sd_err(("%s: R5 flags include errbits: 0x%02x\n", __FUNCTION__, rflags));
+
+ /* The CRC error flag applies to the previous command */
+ if (rflags & (SD_RSP_R5_ERRBITS & ~SD_RSP_R5_COM_CRC_ERROR)) {
+ err = BCME_SDIO_ERROR;
+ goto done;
+ }
+ }
+
+ if (((rflags & (SD_RSP_R5_IO_CURRENTSTATE0 | SD_RSP_R5_IO_CURRENTSTATE1)) != 0x10) &&
+ ((rflags & (SD_RSP_R5_IO_CURRENTSTATE0 | SD_RSP_R5_IO_CURRENTSTATE1)) != 0x20)) {
+ sd_err(("%s: R5 flags has bad state: 0x%02x\n", __FUNCTION__, rflags));
+ err = BCME_SDIO_ERROR;
+ goto done;
+ }
+
+ if (GFIELD(rsp5, RSP5_STUFF)) {
+ sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
+ err = BCME_SDIO_ERROR;
+ goto done;
+ }
+
+done:
+ if (err == BCME_NODEVICE)
+ return err;
+
+ /* XXX As per spec 3.7.1 (and to be safe) do the resets here */
+ sdstd_wreg8(sd, SD_SoftwareReset,
+ SFIELD(SFIELD(0, SW_RESET_DAT, 1), SW_RESET_CMD, 1));
+
+ retries = RETRIES_LARGE;
+ do {
+ rflags = sdstd_rreg8(sd, SD_SoftwareReset);
+ if (!GFIELD(rflags, SW_RESET_DAT) && !GFIELD(rflags, SW_RESET_CMD))
+ break;
+ } while (--retries);
+
+ if (!retries) {
+ sd_err(("%s: Timeout waiting for DAT/CMD reset: 0x%02x\n",
+ __FUNCTION__, rflags));
+ err = BCME_SDIO_ERROR;
+ }
+
+ return err;
+}
+
+extern int
+sdioh_abort(sdioh_info_t *sd, uint fnum)
+{
+ int ret;
+
+ sdstd_lock(sd);
+ ret = sdstd_abort(sd, fnum);
+ sdstd_unlock(sd);
+
+ return ret;
+}
+
+int
+sdioh_start(sdioh_info_t *sd, int stage)
+{
+ return SUCCESS;
+}
+
+int
+sdioh_stop(sdioh_info_t *sd)
+{
+ return SUCCESS;
+}
+
+int
+sdioh_waitlockfree(sdioh_info_t *sd)
+{
+ sdstd_waitlockfree(sd);
+ return SUCCESS;
+}
+
+static int
+sdstd_check_errs(sdioh_info_t *sdioh_info, uint32 cmd, uint32 arg)
+{
+ uint16 regval;
+ uint retries;
+ uint function = 0;
+
+ /* If no errors, we're done */
+ if ((regval = sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus)) == 0)
+ return SUCCESS;
+
+#ifdef BCMQT
+ if (regval == 0xFFFF) {
+ /* XXX - Getting bogus errors under QT
+ * XXX - Not sure why; Just ignore for now
+ */
+ sd_err(("%s: Bogus SD_ErrorIntrStatus: 0x%x????\n", __FUNCTION__, regval));
+ sdstd_wreg16(sdioh_info, SD_ErrorIntrStatus, regval);
+ return SUCCESS;
+ }
+#endif
+
+ sd_info(("%s: ErrorIntrStatus 0x%04x (clearing), IntrStatus 0x%04x PresentState 0x%08x\n",
+ __FUNCTION__, regval, sdstd_rreg16(sdioh_info, SD_IntrStatus),
+ sdstd_rreg(sdioh_info, SD_PresentState)));
+ sdstd_wreg16(sdioh_info, SD_ErrorIntrStatus, regval);
+
+ if (cmd == SDIOH_CMD_14) {
+ if (regval & ERRINT_CMD_TIMEOUT_BIT) {
+ /* PR 101351: sdiod_aos sleep followed by immediate wakeup
+ * before sdiod_aos takes over has a problem.
+ * Getting command timeouts while exiting sleep
+ * with CMD14. Ignore this error due to this PR.
+ */
+ regval &= ~ERRINT_CMD_TIMEOUT_BIT;
+ }
+ }
+
+ /* On command error, issue CMD reset */
+ if (regval & ERRINT_CMD_ERRS) {
+ sd_trace(("%s: issuing CMD reset\n", __FUNCTION__));
+ sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1));
+ for (retries = RETRIES_LARGE; retries; retries--)
+ if (!(GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset), SW_RESET_CMD)))
+ break;
+ if (!retries) {
+ sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__));
+ }
+ }
+
+ /* On data error, issue DAT reset */
+ if (regval & ERRINT_DATA_ERRS) {
+ if (regval & ERRINT_ADMA_BIT)
+ sd_err(("%s:ADMAError: status:0x%x\n",
+ __FUNCTION__, sdstd_rreg(sdioh_info, SD_ADMA_ErrStatus)));
+ sd_trace(("%s: issuing DAT reset\n", __FUNCTION__));
+ sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_DAT, 1));
+ for (retries = RETRIES_LARGE; retries; retries--)
+ if (!(GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset), SW_RESET_DAT)))
+ break;
+ if (!retries) {
+ sd_err(("%s: Timeout waiting for DAT line reset\n", __FUNCTION__));
+ }
+ }
+
+ /* For an IO command (CMD52 or CMD53) issue an abort to the appropriate function */
+ if (cmd == SDIOH_CMD_53)
+ function = GFIELD(arg, CMD53_FUNCTION);
+ else if (cmd == SDIOH_CMD_52) {
+ /* PR 101351: sdiod_aos sleep followed by immediate wakeup
+ * before sdiod_aos takes over has a problem.
+ */
+ if (GFIELD(arg, CMD52_REG_ADDR) != F1_SLEEPCSR_ADDR)
+ function = GFIELD(arg, CMD52_FUNCTION);
+ }
+ if (function) {
+ sd_trace(("%s: requesting abort for function %d after cmd %d\n",
+ __FUNCTION__, function, cmd));
+ sdstd_abort(sdioh_info, function);
+ }
+
+ if (trap_errs)
+ ASSERT(0);
+
+ return ERROR;
+}
+
+#ifdef BCMINTERNAL
+extern SDIOH_API_RC
+sdioh_test_diag(sdioh_info_t *sd)
+{
+ sd_err(("%s: Implement me\n", __FUNCTION__));
+ return (0);
+}
+#endif /* BCMINTERNAL */
+
+/*
+ * Private/Static work routines
+ */
+static bool
+sdstd_reset(sdioh_info_t *sd, bool host_reset, bool client_reset)
+{
+ int retries = RETRIES_LARGE;
+ uchar regval;
+
+ if (!sd)
+ return TRUE;
+
+ sdstd_lock(sd);
+ /* Reset client card */
+ if (client_reset && (sd->adapter_slot != -1)) {
+ if (sdstd_card_regwrite(sd, 0, SDIOD_CCCR_IOABORT, 1, 0x8) != SUCCESS)
+ sd_err(("%s: Cannot write to card reg 0x%x\n",
+ __FUNCTION__, SDIOD_CCCR_IOABORT));
+ else
+ sd->card_rca = 0;
+ }
+
+ /* Reset host controller */
+ if (host_reset) {
+ regval = SFIELD(0, SW_RESET_ALL, 1);
+ sdstd_wreg8(sd, SD_SoftwareReset, regval);
+ do {
+ sd_trace(("%s: waiting for reset\n", __FUNCTION__));
+ } while ((sdstd_rreg8(sd, SD_SoftwareReset) & regval) && retries--);
+
+ if (!retries) {
+ sd_err(("%s: Timeout waiting for host reset\n", __FUNCTION__));
+ sdstd_unlock(sd);
+ return (FALSE);
+ }
+
+ /* A reset should reset bus back to 1 bit mode */
+ sd->sd_mode = SDIOH_MODE_SD1;
+ sdstd_set_dma_mode(sd, sd->sd_dma_mode);
+ }
+ sdstd_unlock(sd);
+ return TRUE;
+}
+
+/* Disable device interrupt */
+void
+sdstd_devintr_off(sdioh_info_t *sd)
+{
+ sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+ if (sd->use_client_ints) {
+ sd->intmask &= ~CLIENT_INTR;
+ sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
+ sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
+ }
+}
+
+/* Enable device interrupt */
+void
+sdstd_devintr_on(sdioh_info_t *sd)
+{
+ ASSERT(sd->lockcount == 0);
+ sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+ if (sd->use_client_ints) {
+ if (sd->version < HOST_CONTR_VER_3) {
+ uint16 status = sdstd_rreg16(sd, SD_IntrStatusEnable);
+ sdstd_wreg16(sd, SD_IntrStatusEnable, SFIELD(status, INTSTAT_CARD_INT, 0));
+ sdstd_wreg16(sd, SD_IntrStatusEnable, status);
+ }
+
+ sd->intmask |= CLIENT_INTR;
+ sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
+ sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
+ }
+}
+
+#ifdef BCMSDYIELD
+/* Enable/disable other interrupts */
+void
+sdstd_intrs_on(sdioh_info_t *sd, uint16 norm, uint16 err)
+{
+ if (err) {
+ norm = SFIELD(norm, INTSTAT_ERROR_INT, 1);
+ sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, err);
+ }
+
+ sd->intmask |= norm;
+ sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
+ if (sd_forcerb)
+ sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
+}
+
+void
+sdstd_intrs_off(sdioh_info_t *sd, uint16 norm, uint16 err)
+{
+ if (err) {
+ norm = SFIELD(norm, INTSTAT_ERROR_INT, 1);
+ sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, 0);
+ }
+
+ sd->intmask &= ~norm;
+ sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
+ if (sd_forcerb)
+ sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
+}
+#endif /* BCMSDYIELD */
+
+static int
+sdstd_host_init(sdioh_info_t *sd)
+{
+ int num_slots, full_slot;
+ uint8 reg8;
+ uint32 card_ins;
+ int slot, first_bar = 0;
+ bool detect_slots = FALSE;
+#ifdef _WIN32
+ NDIS_PHYSICAL_ADDRESS bar;
+#else
+ uint bar;
+#endif
+
+ /* Check for Arasan ID */
+ if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_SI_IMAGE) {
+ sd_info(("%s: Found Arasan Standard SDIO Host Controller\n", __FUNCTION__));
+ sd->controller_type = SDIOH_TYPE_ARASAN_HDK;
+ detect_slots = TRUE;
+ /* Controller supports SDMA, so turn it on here. */
+ sd->sd_dma_mode = DMA_MODE_SDMA;
+ } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_BROADCOM) {
+ sd_info(("%s: Found Broadcom 27xx Standard SDIO Host Controller\n", __FUNCTION__));
+ sd->controller_type = SDIOH_TYPE_BCM27XX;
+ detect_slots = FALSE;
+ } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_TI) {
+ sd_info(("%s: Found TI PCIxx21 Standard SDIO Host Controller\n", __FUNCTION__));
+ sd->controller_type = SDIOH_TYPE_TI_PCIXX21;
+ detect_slots = TRUE;
+ } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_RICOH) {
+ sd_info(("%s: Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host Adapter\n",
+ __FUNCTION__));
+ sd->controller_type = SDIOH_TYPE_RICOH_R5C822;
+ detect_slots = TRUE;
+ } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_JMICRON) {
+ sd_info(("%s: JMicron Standard SDIO Host Controller\n",
+ __FUNCTION__));
+ sd->controller_type = SDIOH_TYPE_JMICRON;
+ detect_slots = TRUE;
+#ifdef BCMINTERNAL
+ } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_JINVANI) {
+ sd_info(("%s: Found Jinvani Standard SDIO Host Controller\n", __FUNCTION__));
+ detect_slots = FALSE;
+ sd->controller_type = SDIOH_TYPE_JINVANI_GOLD;
+#endif /* BCMINTERNAL */
+ } else {
+ return ERROR;
+ }
+
+ /*
+ * Determine num of slots
+ * Search each slot
+ */
+
+ first_bar = OSL_PCI_READ_CONFIG(sd->osh, SD_SlotInfo, 4) & 0x7;
+ num_slots = (OSL_PCI_READ_CONFIG(sd->osh, SD_SlotInfo, 4) & 0xff) >> 4;
+ num_slots &= 7;
+ num_slots++; /* map bits to num slots according to spec */
+
+ /* XXX Since the sdio20h core does not present the proper SD_SlotInfo
+ * register at PCI config space offset 0x40, fake it here. Also,
+ * set the BAR0 window to point to the sdio20h core.
+ */
+ if (OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) ==
+ ((SDIOH_FPGA_ID << 16) | VENDOR_BROADCOM)) {
+ sd_err(("%s: Found Broadcom Standard SDIO Host Controller FPGA\n", __FUNCTION__));
+ /* Set BAR0 Window to SDIOSTH core */
+ OSL_PCI_WRITE_CONFIG(sd->osh, PCI_BAR0_WIN, 4, 0x18001000);
+
+ /* Set defaults particular to this controller. */
+ detect_slots = TRUE;
+ num_slots = 1;
+ first_bar = 0;
+
+ /* Controller supports ADMA2, so turn it on here. */
+ sd->sd_dma_mode = DMA_MODE_ADMA2;
+ }
+
+ /* Map in each slot on the board and query it to see if a
+ * card is inserted. Use the first populated slot found.
+ */
+ if (sd->mem_space) {
+ sdstd_reg_unmap(sd->osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ);
+ sd->mem_space = NULL;
+ }
+
+ full_slot = -1;
+
+ for (slot = 0; slot < num_slots; slot++) {
+/* XXX :Ugly define, is there a better way */
+#ifdef _WIN32
+ bar.HighPart = 0;
+ bar.LowPart = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0
+ + (4*(slot + first_bar)), 4);
+ sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh,
+ (int32)&bar, SDIOH_REG_WINSZ);
+#else
+ bar = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(slot + first_bar)), 4);
+ sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh,
+ (uintptr)bar, SDIOH_REG_WINSZ);
+#endif
+
+ sd->adapter_slot = -1;
+
+ if (detect_slots) {
+ card_ins = GFIELD(sdstd_rreg(sd, SD_PresentState), PRES_CARD_PRESENT);
+ } else {
+ card_ins = TRUE;
+ }
+
+ if (card_ins) {
+ sd_info(("%s: SDIO slot %d: Full\n", __FUNCTION__, slot));
+ if (full_slot < 0)
+ full_slot = slot;
+ } else {
+ sd_info(("%s: SDIO slot %d: Empty\n", __FUNCTION__, slot));
+ }
+
+ if (sd->mem_space) {
+ sdstd_reg_unmap(sd->osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ);
+ sd->mem_space = NULL;
+ }
+ }
+
+ if (full_slot < 0) {
+ sd_err(("No slots on SDIO controller are populated\n"));
+ return -1;
+ }
+
+/* XXX :Ugly define, is there a better way */
+#ifdef _WIN32
+ bar.HighPart = 0;
+ bar.LowPart = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4);
+ sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh, (int32)&bar, SDIOH_REG_WINSZ);
+#else
+ bar = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4);
+ sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh, (uintptr)bar, SDIOH_REG_WINSZ);
+#endif
+
+ sd_err(("Using slot %d at BAR%d [0x%08x] mem_space 0x%p\n",
+ full_slot,
+ (full_slot + first_bar),
+ OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4),
+ sd->mem_space));
+
+ sd->adapter_slot = full_slot;
+
+ sd->version = sdstd_rreg16(sd, SD_HostControllerVersion) & 0xFF;
+ switch (sd->version) {
+ case 0:
+ sd_err(("Host Controller version 1.0, Vendor Revision: 0x%02x\n",
+ sdstd_rreg16(sd, SD_HostControllerVersion) >> 8));
+ break;
+ case 1:
+ sd_err(("Host Controller version 2.0, Vendor Revision: 0x%02x\n",
+ sdstd_rreg16(sd, SD_HostControllerVersion) >> 8));
+ break;
+ case 2:
+ sd_err(("Host Controller version 3.0, Vendor Revision: 0x%02x\n",
+ sdstd_rreg16(sd, SD_HostControllerVersion) >> 8));
+ break;
+ default:
+ sd_err(("%s: Host Controller version 0x%02x not supported.\n",
+ __FUNCTION__, sd->version));
+ break;
+ }
+
+ sd->caps = sdstd_rreg(sd, SD_Capabilities); /* Cache this for later use */
+ /* MSB 32 bits of caps supported in sdio 3.0 */
+ sd->caps3 = sdstd_rreg(sd, SD_Capabilities3); /* Cache this for later use */
+ sd3_trace(("sd3: %s: caps: 0x%x; MCCap:0x%x\n", __FUNCTION__, sd->caps, sd->curr_caps));
+ sd3_trace(("sd3: %s: caps3: 0x%x\n", __FUNCTION__, sd->caps3));
+ sd->curr_caps = sdstd_rreg(sd, SD_MaxCurCap);
+
+ sd_info(("%s: caps: 0x%x; MCCap:0x%x\n", __FUNCTION__, sd->caps, sd->curr_caps));
+
+ sdstd_set_dma_mode(sd, sd->sd_dma_mode);
+
+#if defined(BCMINTERNAL)
+ if (OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) ==
+ ((SDIOH_FPGA_ID << 16) | VENDOR_BROADCOM)) {
+ sd_err(("* * * SDIO20H FPGA Build Date: 0x%04x\n", sdstd_rreg(sd, 0x110)));
+ }
+
+ if (GFIELD(sd->caps, CAP_MAXBLOCK) == 0x3) {
+ sd_info(("SD HOST CAPS: Max block size is INVALID\n"));
+ } else {
+ sd_info(("SD HOST CAPS: Max block size is %d bytes\n",
+ 512 << GFIELD(sd->caps, CAP_MAXBLOCK)));
+ }
+
+ sd_info(("SD HOST CAPS: 64-bit DMA is %ssupported.\n",
+ GFIELD(sd->caps, CAP_64BIT_HOST) ? "" : "not "));
+ sd_info(("SD HOST CAPS: Suspend/Resume is %ssupported.\n",
+ GFIELD(sd->caps, CAP_SUSPEND) ? "" : "not "));
+
+ sd_err(("SD HOST CAPS: SD Host supports "));
+ if (GFIELD(sd->caps, CAP_VOLT_3_3)) {
+ sd_err(("3.3V"));
+ if (GFIELD(sd->curr_caps, CAP_CURR_3_3)) {
+ sd_err(("@%dmA\n", 4*GFIELD(sd->curr_caps, CAP_CURR_3_3)));
+ }
+ }
+ if (GFIELD(sd->caps, CAP_VOLT_3_0)) {
+ sd_err((", 3.0V"));
+ if (GFIELD(sd->curr_caps, CAP_CURR_3_0)) {
+ sd_err(("@%dmA\n", 4*GFIELD(sd->curr_caps, CAP_CURR_3_0)));
+ }
+ }
+ if (GFIELD(sd->caps, CAP_VOLT_1_8)) {
+ sd_err((", 1.8V"));
+ if (GFIELD(sd->curr_caps, CAP_CURR_1_8)) {
+ sd_err(("@%dmA\n", 4*GFIELD(sd->curr_caps, CAP_CURR_1_8)));
+ }
+ }
+ sd_err(("\n"));
+#endif /* defined(BCMINTERNAL) */
+
+ sdstd_reset(sd, 1, 0);
+
+ /* Read SD4/SD1 mode */
+ if ((reg8 = sdstd_rreg8(sd, SD_HostCntrl))) {
+ if (reg8 & SD4_MODE) {
+ sd_err(("%s: Host cntrlr already in 4 bit mode: 0x%x\n",
+ __FUNCTION__, reg8));
+ }
+ }
+
+ /* Default power on mode is SD1 */
+ sd->sd_mode = SDIOH_MODE_SD1;
+ sd->polled_mode = TRUE;
+ sd->host_init_done = TRUE;
+ sd->card_init_done = FALSE;
+ sd->adapter_slot = full_slot;
+
+ /* XXX: If sd_uhsimode is disabled, which means that, use the HC in SDIO 2.0 mode. */
+ if (sd_uhsimode == SD3CLKMODE_DISABLED) {
+ sd->version = HOST_CONTR_VER_2;
+ sd3_trace(("%s:forcing to SDIO HC 2.0\n", __FUNCTION__));
+ }
+
+ if (sd->version == HOST_CONTR_VER_3) {
+ /* read host ctrl 2 */
+ uint16 reg16 = 0;
+ sd3_trace(("sd3: %s: HC3: reading additional regs\n", __FUNCTION__));
+
+ reg16 = sdstd_rreg16(sd, SD3_HostCntrl2);
+
+ sd_info(("%s: HCtrl: 0x%x; HCtrl2:0x%x\n", __FUNCTION__, reg8, reg16));
+ BCM_REFERENCE(reg16);
+
+ /* if HC supports 1.8V and one of the SDR/DDR modes, hc uhci support is PRESENT */
+ if ((GFIELD(sd->caps, CAP_VOLT_1_8)) &&
+ (GFIELD(sd->caps3, CAP3_SDR50_SUP) ||
+ GFIELD(sd->caps3, CAP3_SDR104_SUP) ||
+ GFIELD(sd->caps3, CAP3_DDR50_SUP)))
+ sd->host_UHSISupported = 1;
+ }
+
+#ifdef BCMQT
+ {
+ uint32 intmask;
+
+ /* FIX: force interrupts with QT sdio20 host */
+ /* pci cw [expr $def(configbase) +0x95] 1 2 */
+ intmask = OSL_PCI_READ_CONFIG(sd->osh, PCI_INT_MASK, 4);
+ intmask |= 0x0200;
+ OSL_PCI_WRITE_CONFIG(sd->osh, PCI_INT_MASK, 4, intmask);
+ }
+#endif
+ return (SUCCESS);
+}
+#define CMD5_RETRIES 200
+static int
+get_ocr(sdioh_info_t *sd, uint32 *cmd_arg, uint32 *cmd_rsp)
+{
+ int retries, status;
+
+ /* Get the Card's Operation Condition. Occasionally the board
+ * takes a while to become ready
+ */
+ retries = CMD5_RETRIES;
+ do {
+ *cmd_rsp = 0;
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_5, *cmd_arg))
+ != SUCCESS) {
+ sd_err(("%s: CMD5 failed\n", __FUNCTION__));
+ return status;
+ }
+ sdstd_cmd_getrsp(sd, cmd_rsp, 1);
+ if (!GFIELD(*cmd_rsp, RSP4_CARD_READY))
+ sd_trace(("%s: Waiting for card to become ready\n", __FUNCTION__));
+ } while ((!GFIELD(*cmd_rsp, RSP4_CARD_READY)) && --retries);
+ if (!retries)
+ return ERROR;
+
+ return (SUCCESS);
+}
+
+static int
+sdstd_client_init(sdioh_info_t *sd)
+{
+ uint32 cmd_arg, cmd_rsp;
+ int status;
+ uint8 fn_ints;
+ uint32 regdata;
+ uint16 powerstat = 0;
+
+#ifdef BCMINTERNAL
+#ifdef NOTUSED
+ /* Handy routine to dump capabilities. */
+ static char caps_buf[500];
+ parse_caps(sd->caps, caps_buf, 500);
+ sd_err((caps_buf));
+#endif /* NOTUSED */
+#endif /* BCMINTERNAL */
+
+ sd_trace(("%s: Powering up slot %d\n", __FUNCTION__, sd->adapter_slot));
+
+ /* Clear any pending ints */
+ sdstd_wreg16(sd, SD_IntrStatus, 0x1fff);
+ sdstd_wreg16(sd, SD_ErrorIntrStatus, 0x0fff);
+
+ /* Enable both Normal and Error Status. This does not enable
+ * interrupts, it only enables the status bits to
+ * become 'live'
+ */
+
+ if (!sd->host_UHSISupported)
+ sdstd_wreg16(sd, SD_IntrStatusEnable, 0x1ff);
+ else
+ {
+ /* INT_x interrupts, but DO NOT enable signalling [enable retuning
+ * will happen later]
+ */
+ sdstd_wreg16(sd, SD_IntrStatusEnable, 0x0fff);
+ }
+ sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, 0xffff);
+
+ sdstd_wreg16(sd, SD_IntrSignalEnable, 0); /* Disable ints for now. */
+
+ if (sd->host_UHSISupported) {
+ /* when HC is started for SDIO 3.0 mode, start in lowest voltage mode first. */
+ powerstat = sdstd_start_power(sd, 1);
+ if (SDIO_OCR_READ_FAIL == powerstat) {
+ /* This could be because the device is 3.3V, and possible does
+ * not have sdio3.0 support. So, try in highest voltage
+ */
+ sd_err(("sdstd_start_power: legacy device: trying highest voltage\n"));
+ sd_err(("%s failed\n", __FUNCTION__));
+ return ERROR;
+ } else if (TRUE != powerstat) {
+ sd_err(("sdstd_start_power failed\n"));
+ return ERROR;
+ }
+ } else
+ /* XXX legacy driver: start in highest voltage mode first.
+ * CAUTION: trying to start a legacy dhd with sdio3.0HC and sdio3.0 device could
+ * burn the sdio3.0device if the device has started in 1.8V.
+ */
+ if (TRUE != sdstd_start_power(sd, 0)) {
+ sd_err(("sdstd_start_power failed\n"));
+ return ERROR;
+ }
+
+ if (sd->num_funcs == 0) {
+ sd_err(("%s: No IO funcs!\n", __FUNCTION__));
+ return ERROR;
+ }
+
+ /* In SPI mode, issue CMD0 first */
+ if (sd->sd_mode == SDIOH_MODE_SPI) {
+ cmd_arg = 0;
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_0, cmd_arg))
+ != SUCCESS) {
+ sd_err(("BCMSDIOH: cardinit: CMD0 failed!\n"));
+ return status;
+ }
+ }
+
+ if (sd->sd_mode != SDIOH_MODE_SPI) {
+ uint16 rsp6_status;
+
+ /* Card is operational. Ask it to send an RCA */
+ cmd_arg = 0;
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_3, cmd_arg))
+ != SUCCESS) {
+ sd_err(("%s: CMD3 failed!\n", __FUNCTION__));
+ return status;
+ }
+
+ /* Verify the card status returned with the cmd response */
+ sdstd_cmd_getrsp(sd, &cmd_rsp, 1);
+ rsp6_status = GFIELD(cmd_rsp, RSP6_STATUS);
+ if (GFIELD(rsp6_status, RSP6STAT_COM_CRC_ERROR) ||
+ GFIELD(rsp6_status, RSP6STAT_ILLEGAL_CMD) ||
+ GFIELD(rsp6_status, RSP6STAT_ERROR)) {
+ sd_err(("%s: CMD3 response error. Response = 0x%x!\n",
+ __FUNCTION__, rsp6_status));
+ return ERROR;
+ }
+
+ /* Save the Card's RCA */
+ sd->card_rca = GFIELD(cmd_rsp, RSP6_IO_RCA);
+ sd_info(("RCA is 0x%x\n", sd->card_rca));
+
+ if (rsp6_status)
+ sd_err(("raw status is 0x%x\n", rsp6_status));
+
+ /* Select the card */
+ cmd_arg = SFIELD(0, CMD7_RCA, sd->card_rca);
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_7, cmd_arg))
+ != SUCCESS) {
+ sd_err(("%s: CMD7 failed!\n", __FUNCTION__));
+ return status;
+ }
+ sdstd_cmd_getrsp(sd, &cmd_rsp, 1);
+ if (cmd_rsp != SDIOH_CMD7_EXP_STATUS) {
+ sd_err(("%s: CMD7 response error. Response = 0x%x!\n",
+ __FUNCTION__, cmd_rsp));
+ return ERROR;
+ }
+ }
+
+ /* Disable default/power-up device Card Detect (CD) pull up resistor on DAT3
+ * via CCCR bus interface control register. Set CD disable bit while leaving
+ * others alone.
+ */
+ if (sdstd_card_regread (sd, 0, SDIOD_CCCR_BICTRL, 1, ®data) != SUCCESS) {
+ sd_err(("Disabling card detect: read of device CCCR BICTRL register failed\n"));
+ return ERROR;
+ }
+ regdata |= BUS_CARD_DETECT_DIS;
+
+ if (sdstd_card_regwrite (sd, 0, SDIOD_CCCR_BICTRL, 1, regdata) != SUCCESS) {
+ sd_err(("Disabling card detect: write of device CCCR BICTRL register failed\n"));
+ return ERROR;
+ }
+
+ sdstd_card_enablefuncs(sd);
+
+ if (!sdstd_bus_width(sd, sd_sdmode)) {
+ sd_err(("sdstd_bus_width failed\n"));
+ return ERROR;
+ }
+
+ set_client_block_size(sd, 1, sd_f1_blocksize);
+ fn_ints = INTR_CTL_FUNC1_EN;
+
+ if (sd->num_funcs >= 2) {
+ /* XXX Device side can't handle 512 yet */
+ set_client_block_size(sd, 2, sd_f2_blocksize /* BLOCK_SIZE_4328 */);
+ fn_ints |= INTR_CTL_FUNC2_EN;
+ }
+
+ /* Enable/Disable Client interrupts */
+ /* Turn on here but disable at host controller? */
+ if (sdstd_card_regwrite(sd, 0, SDIOD_CCCR_INTEN, 1,
+ (fn_ints | INTR_CTL_MASTER_EN)) != SUCCESS) {
+ sd_err(("%s: Could not enable ints in CCCR\n", __FUNCTION__));
+ return ERROR;
+ }
+
+ if (sd_uhsimode != SD3CLKMODE_DISABLED) {
+ /* Switch to High-speed clocking mode if both host and device support it */
+ if (sdstd_3_clock_wrapper(sd) != SUCCESS) {
+ sd_err(("sdstd_3_clock_wrapper failed\n"));
+ return ERROR;
+ }
+ } else
+ {
+ if (sdstd_clock_wrapper(sd)) {
+ sd_err(("sdstd_start_clock failed\n"));
+ return ERROR;
+ }
+ }
+ sd->card_init_done = TRUE;
+
+ return SUCCESS;
+}
+
+static int
+sdstd_clock_wrapper(sdioh_info_t *sd)
+{
+ sd_trace(("%s:Enter\n", __FUNCTION__));
+ /* After configuring for High-Speed mode, set the desired clock rate. */
+ sdstd_set_highspeed_mode(sd, (bool)sd_hiok);
+
+ if (FALSE == sdstd_start_clock(sd, (uint16)sd_divisor)) {
+ sd_err(("sdstd_start_clock failed\n"));
+ return ERROR;
+ }
+ return SUCCESS;
+}
+
+static int
+sdstd_3_clock_wrapper(sdioh_info_t *sd)
+{
+ int retclk = 0;
+ sd_info(("%s: Enter\n", __FUNCTION__));
+ if (sd->card_UHSI_voltage_Supported) {
+ /* check if clk config requested is supported by both host and target. */
+ retclk = sdstd_3_get_matching_uhsi_clkmode(sd, sd_uhsimode);
+
+ /* if no match for requested caps, try to get the max match possible */
+ if (retclk == -1) {
+ /* if auto enabled */
+ if (sd3_autoselect_uhsi_max == 1) {
+ retclk = sdstd_3_get_matching_uhsi_clkmode(sd, SD3CLKMODE_AUTO);
+ /* still NO match */
+ if (retclk == -1) {
+ /* NO match with HC and card capabilities. Now try the
+ * High speed/legacy mode if possible.
+ */
+
+ sd_err(("%s: Not able to set requested clock\n",
+ __FUNCTION__));
+ return ERROR;
+ }
+ } else {
+ /* means user doesn't want auto clock. So return ERROR */
+ sd_err(("%s: Not able to set requested clock, Try"
+ "auto mode\n", __FUNCTION__));
+ return ERROR;
+ }
+ }
+
+ if (retclk != -1) {
+ /* set the current clk to be selected clock */
+ sd_uhsimode = retclk;
+
+ if (BCME_OK != sdstd_3_set_highspeed_uhsi_mode(sd, sd_uhsimode)) {
+ sd_err(("%s: Not able to set requested clock\n", __FUNCTION__));
+ return ERROR;
+ }
+ } else {
+ /* try legacy mode */
+ if (SUCCESS != sdstd_clock_wrapper(sd)) {
+ sd_err(("sdstd_start_clock failed\n"));
+ return ERROR;
+ }
+ }
+ } else {
+ sd_info(("%s: Legacy Mode Clock\n", __FUNCTION__));
+ /* try legacy mode */
+ if (SUCCESS != sdstd_clock_wrapper(sd)) {
+ sd_err(("%s sdstd_clock_wrapper failed\n", __FUNCTION__));
+ return ERROR;
+ }
+ }
+ return SUCCESS;
+}
+
+int
+sdstd_3_clk_tuning(sdioh_info_t *sd, uint32 sd3ClkMode)
+{
+ int status, lcount = 0, brr_count = 0;
+ uint16 val1 = 0, bufready = 0;
+ uint32 val2 = 0;
+ uint8 phase_info_local = 0;
+
+ sd3_trace(("sd3: %s: Enter\n", __FUNCTION__));
+ /* if (NOT SDR104) OR
+ * (SDR_50 AND sdr50_tuning_reqd is NOT enabled)
+ * return success, as tuning not reqd.
+ */
+ if (!sd->sd3_tuning_reqd) {
+ sd_info(("%s: Tuning NOT reqd!\n", __FUNCTION__));
+ return SUCCESS;
+ }
+
+ /* execute tuning procedure */
+
+ /* enable Buffer ready status. [donot enable the interrupt right now] */
+ /* Execute tuning */
+ sd_trace(("sd3: %s: Execute tuning\n", __FUNCTION__));
+ val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
+ val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 1);
+ sdstd_wreg16(sd, SD3_HostCntrl2, val1);
+
+ do {
+ sd3_trace(("sd3: %s: cmd19 issue\n", __FUNCTION__));
+ /* Issue cmd19 */
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_19, 0))
+ != SUCCESS) {
+ sd_err(("%s: CMD19 failed\n", __FUNCTION__));
+ val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
+ val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 0);
+ val1 = SFIELD(val1, HOSTCtrl2_SAMPCLK_SEL, 0);
+ sdstd_wreg16(sd, SD3_HostCntrl2, val1);
+ return status;
+ }
+
+ /* wait for buffer read ready */
+ brr_count = 0;
+ do {
+ bufready = sdstd_rreg16(sd, SD_IntrStatus);
+
+ if (GFIELD(bufready, INTSTAT_BUF_READ_READY))
+ break;
+
+ /* delay after checking bufready becuase INTSTAT_BUF_READ_READY
+ might have been most likely set already in the first check
+ */
+ OSL_DELAY(1);
+ } while (++brr_count < CLKTUNING_MAX_BRR_RETRIES);
+
+ /* buffer read ready timedout */
+ if (brr_count == CLKTUNING_MAX_BRR_RETRIES) {
+ sd_err(("%s: TUNINGFAILED: BRR response timedout!\n",
+ __FUNCTION__));
+ val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
+ val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 0);
+ val1 = SFIELD(val1, HOSTCtrl2_SAMPCLK_SEL, 0);
+ sdstd_wreg16(sd, SD3_HostCntrl2, val1);
+ return ERROR;
+ }
+
+ /* In response to CMD19 card will send 64 magic bytes.
+ * Current Aizyc HC h/w doesn't auto clear those bytes.
+ * So read 64 bytes send by card.
+ * Aizyc need to implement in hw to do an auto clear.
+ */
+ if (sd3_sw_read_magic_bytes == TRUE)
+ {
+ uint8 l_cnt_1 = 0;
+ uint32 l_val_1 = 0;
+ for (l_cnt_1 = 0; l_cnt_1 < 16; l_cnt_1++) {
+ l_val_1 = sdstd_rreg(sd, SD_BufferDataPort0);
+ sd_trace(("%s:l_val_1 = 0x%x", __FUNCTION__, l_val_1));
+ }
+ BCM_REFERENCE(l_val_1);
+ }
+
+ /* clear BuffReadReady int */
+ bufready = SFIELD(bufready, INTSTAT_BUF_READ_READY, 1);
+ sdstd_wreg16(sd, SD_IntrStatus, bufready);
+
+ /* wait before continuing */
+ /* OSL_DELAY(PER_TRY_TUNING_DELAY_MS * 1000); */ /* Not required */
+
+ /* check execute tuning bit */
+ val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
+ if (!GFIELD(val1, HOSTCtrl2_EXEC_TUNING)) {
+ /* done tuning, break from loop */
+ break;
+ }
+
+ /* max tuning iterations exceeded */
+ if (lcount++ > MAX_TUNING_ITERS) {
+ sd_err(("%s: TUNINGFAILED: Max tuning iterations"
+ "exceeded!\n", __FUNCTION__));
+ val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
+ val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 0);
+ val1 = SFIELD(val1, HOSTCtrl2_SAMPCLK_SEL, 0);
+ sdstd_wreg16(sd, SD3_HostCntrl2, val1);
+ return ERROR;
+ }
+ } while (1);
+
+ val2 = sdstd_rreg(sd, SD3_Tuning_Info_Register);
+ phase_info_local = ((val2>>15)& 0x7);
+ sd_info(("Phase passed info: 0x%x\n", (val2>>8)& 0x3F));
+ sd_info(("Phase selected post tune: 0x%x\n", phase_info_local));
+
+ if (phase_info_local > SDSTD_MAX_TUNING_PHASE) {
+ sd_err(("!!Phase selected:%x\n", phase_info_local));
+ }
+
+ /* check sampling clk select */
+ val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
+ if (!GFIELD(val1, HOSTCtrl2_SAMPCLK_SEL)) {
+ /* error in selecting clk */
+ sd_err(("%s: TUNINGFAILED: SamplClkSel failed!\n", __FUNCTION__));
+ val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
+ val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 0);
+ val1 = SFIELD(val1, HOSTCtrl2_SAMPCLK_SEL, 0);
+ sdstd_wreg16(sd, SD3_HostCntrl2, val1);
+ return ERROR;
+ }
+/* done: */
+ sd_info(("%s: TUNING Success!\n", __FUNCTION__));
+ return SUCCESS;
+}
+
+void
+sdstd_3_enable_retuning_int(sdioh_info_t *sd)
+{
+ uint16 raw_int;
+ unsigned long flags;
+
+ sdstd_os_lock_irqsave(sd, &flags);
+ raw_int = sdstd_rreg16(sd, SD_IntrSignalEnable);
+ sdstd_wreg16(sd, SD_IntrSignalEnable, (raw_int | HC_INTR_RETUNING));
+ /* Enable retuning status */
+ raw_int = sdstd_rreg16(sd, SD_IntrStatusEnable);
+ sdstd_wreg16(sd, SD_IntrStatusEnable, (raw_int | HC_INTR_RETUNING));
+ sdstd_os_unlock_irqrestore(sd, &flags);
+}
+
+void
+sdstd_3_disable_retuning_int(sdioh_info_t *sd)
+{
+ uint16 raw_int;
+ unsigned long flags;
+
+ sdstd_os_lock_irqsave(sd, &flags);
+ sd->intmask &= ~HC_INTR_RETUNING;
+ raw_int = sdstd_rreg16(sd, SD_IntrSignalEnable);
+ sdstd_wreg16(sd, SD_IntrSignalEnable, (raw_int & (~HC_INTR_RETUNING)));
+ /* Disable retuning status */
+ raw_int = sdstd_rreg16(sd, SD_IntrStatusEnable);
+ sdstd_wreg16(sd, SD_IntrStatusEnable, (raw_int & (~HC_INTR_RETUNING)));
+ sdstd_os_unlock_irqrestore(sd, &flags);
+}
+
+bool
+sdstd_3_is_retuning_int_set(sdioh_info_t *sd)
+{
+ uint16 raw_int;
+
+ raw_int = sdstd_rreg16(sd, SD_IntrStatus);
+
+ if (GFIELD(raw_int, INTSTAT_RETUNING_INT))
+ return TRUE;
+
+ return FALSE;
+}
+
+/*
+ Assumption: sd3ClkMode is checked to be present in both host/card
+ capabilities before entering this function. VALID values for sd3ClkMode
+ in this function: SD3CLKMODE_2, 3, 4 [0 and 1 NOT supported as
+ they are legacy] For that, need to call
+ sdstd_3_get_matching_uhsi_clkmode()
+*/
+static int
+sdstd_3_set_highspeed_uhsi_mode(sdioh_info_t *sd, int sd3ClkMode)
+{
+ uint32 drvstrn;
+ int status;
+ uint8 hc_reg8;
+ uint16 val1 = 0, presetval = 0;
+ uint32 regdata;
+
+ sd3_trace(("sd3: %s:enter:clkmode:%d\n", __FUNCTION__, sd3ClkMode));
+
+ hc_reg8 = sdstd_rreg8(sd, SD_HostCntrl);
+
+ if (HOST_SDR_UNSUPP == sd->global_UHSI_Supp) {
+ sd_err(("%s:Trying to set clk with unsupported global support\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ /* get [double check, as this is already done in
+ sdstd_3_get_matching_uhsi_clkmode] drvstrn
+ */
+ if (!sdstd_3_get_matching_drvstrn(sd, sd3ClkMode, &drvstrn, &presetval)) {
+ sd_err(("%s:DRVStrn mismatch!: card strn:0x%x; HC preset"
+ "val:0x%x\n", __FUNCTION__, drvstrn, presetval));
+ return BCME_SDIO_ERROR;
+ }
+
+ /* also set driver type select in CCCR */
+ if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_DRIVER_STRENGTH,
+ 1, drvstrn)) != BCME_OK) {
+ sd_err(("%s:Setting SDIOD_CCCR_DRIVER_STRENGTH in card Failed!\n", __FUNCTION__));
+ return BCME_SDIO_ERROR;
+ }
+
+ /* ********** change Bus speed select in device */
+ if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+ 1, ®data)) != SUCCESS) {
+ sd_err(("%s:FAILED 1\n", __FUNCTION__));
+ return BCME_SDIO_ERROR;
+ }
+ sd_info(("Attempting to change BSS.current val:0x%x\n", regdata));
+
+ if (regdata & SDIO_SPEED_SHS) {
+ sd_info(("Device supports High-Speed mode.\n"));
+ /* clear existing BSS */
+ regdata &= ~0xE;
+
+ regdata |= (sd3ClkMode << 1);
+
+ sd_info(("Writing %08x to Card at %08x\n",
+ regdata, SDIOD_CCCR_SPEED_CONTROL));
+ if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+ 1, regdata)) != BCME_OK) {
+ sd_err(("%s:FAILED 2\n", __FUNCTION__));
+ return BCME_SDIO_ERROR;
+ }
+
+ if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+ 1, ®data)) != BCME_OK) {
+ sd_err(("%s:FAILED 3\n", __FUNCTION__));
+ return BCME_SDIO_ERROR;
+ }
+
+ sd_info(("Read %08x from Card at %08x\n", regdata, SDIOD_CCCR_SPEED_CONTROL));
+ }
+ else {
+ sd_err(("Device does not support High-Speed Mode.\n"));
+ }
+
+ /* SD Clock Enable = 0 */
+ sdstd_wreg16(sd, SD_ClockCntrl,
+ sdstd_rreg16(sd, SD_ClockCntrl) & ~((uint16)0x4));
+
+ /* set to HighSpeed mode */
+ /* TBD: is these to change SD_HostCntrl reqd for UHSI? */
+ hc_reg8 = SFIELD(hc_reg8, HOST_HI_SPEED_EN, 1);
+ sdstd_wreg8(sd, SD_HostCntrl, hc_reg8);
+
+ /* set UHS Mode select in HC2 and also set preset */
+ val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
+ val1 = SFIELD(val1, HOSTCtrl2_UHSMODE_SEL, sd3ClkMode);
+ if (TRUE != sd3_sw_override1) {
+ val1 = SFIELD(val1, HOSTCtrl2_PRESVAL_EN, 1);
+ } else {
+ /* set hC registers manually using the retreived values */
+ /* *set drvstrn */
+ val1 = SFIELD(val1, HOSTCtrl2_DRIVSTRENGTH_SEL,
+ GFIELD(presetval, PRESET_DRIVR_SELECT));
+ val1 = SFIELD(val1, HOSTCtrl2_PRESVAL_EN, 0);
+ }
+
+ /* finally write Hcontrol2 */
+ sdstd_wreg16(sd, SD3_HostCntrl2, val1);
+
+ sd_err(("%s:HostCtrl2 final value:0x%x\n", __FUNCTION__, val1));
+
+ /* start clock : clk will be enabled inside. */
+ if (FALSE == sdstd_start_clock(sd, GFIELD(presetval, PRESET_CLK_DIV))) {
+ sd_err(("sdstd_start_clock failed\n"));
+ return ERROR;
+ }
+
+ /* execute first tuning procedure */
+ if (!sd3_sw_override1) {
+ if (SD3_TUNING_REQD(sd, sd3ClkMode)) {
+ sd_err(("%s: Tuning start..\n", __FUNCTION__));
+ sd->sd3_tuning_reqd = TRUE;
+ /* TBD: first time: enabling INT's could be problem? */
+ sdstd_3_start_tuning(sd);
+ }
+ else
+ sd->sd3_tuning_reqd = FALSE;
+ }
+
+ return BCME_OK;
+}
+
+/* Check & do tuning if required */
+void sdstd_3_check_and_do_tuning(sdioh_info_t *sd, int tuning_param)
+{
+ int retries = 0;
+
+ if (!sd->sd3_tuning_disable && sd->sd3_tuning_reqd) {
+ sd3_trace(("sd3: %s: tuning reqd\n", __FUNCTION__));
+ if (tuning_param == CHECK_TUNING_PRE_DATA) {
+ if (sd->sd3_tun_state == TUNING_ONGOING) {
+ retries = RETRIES_SMALL;
+ /* check if tuning is already going on */
+ while ((GFIELD(sdstd_rreg(sd, SD3_HostCntrl2),
+ HOSTCtrl2_EXEC_TUNING)) && retries--) {
+ if (retries == RETRIES_SMALL)
+ sd_err(("%s: Waiting for Tuning to complete\n",
+ __FUNCTION__));
+ }
+
+ if (!retries) {
+ sd_err(("%s: Tuning wait timeout\n", __FUNCTION__));
+ if (trap_errs)
+ ASSERT(0);
+ }
+ } else if (sd->sd3_tun_state == TUNING_START) {
+ /* check and start tuning if required. */
+ sd3_trace(("sd3 : %s : Doing Tuning before Data Transfer\n",
+ __FUNCTION__));
+ sdstd_3_start_tuning(sd);
+ }
+ } else if (tuning_param == CHECK_TUNING_POST_DATA) {
+ if (sd->sd3_tun_state == TUNING_START_AFTER_DAT) {
+ sd3_trace(("sd3: %s: tuning start\n", __FUNCTION__));
+ /* check and start tuning if required. */
+ sdstd_3_start_tuning(sd);
+ }
+ }
+ }
+}
+/* Need to run this function in interrupt-disabled context */
+bool sdstd_3_check_and_set_retuning(sdioh_info_t *sd)
+{
+ sd3_trace(("sd3: %s:\n", __FUNCTION__));
+
+ /* if already initiated, just return without anything */
+ if ((sd->sd3_tun_state == TUNING_START) ||
+ (sd->sd3_tun_state == TUNING_ONGOING) ||
+ (sd->sd3_tun_state == TUNING_START_AFTER_DAT)) {
+ /* do nothing */
+ return FALSE;
+ }
+
+ if (sd->sd3_dat_state == DATA_TRANSFER_IDLE) {
+ sd->sd3_tun_state = TUNING_START; /* tuning to be started by the tasklet */
+ return TRUE;
+ } else {
+ /* tuning to be started after finishing the existing data transfer */
+ sd->sd3_tun_state = TUNING_START_AFTER_DAT;
+ }
+ return FALSE;
+}
+
+int sdstd_3_get_data_state(sdioh_info_t *sd)
+{
+ return sd->sd3_dat_state;
+}
+
+void sdstd_3_set_data_state(sdioh_info_t *sd, int state)
+{
+ sd->sd3_dat_state = state;
+}
+
+int sdstd_3_get_tune_state(sdioh_info_t *sd)
+{
+ return sd->sd3_tun_state;
+}
+
+void sdstd_3_set_tune_state(sdioh_info_t *sd, int state)
+{
+ sd->sd3_tun_state = state;
+}
+
+uint8 sdstd_3_get_tuning_exp(sdioh_info_t *sd)
+{
+ if (sd_tuning_period == CAP3_RETUNING_TC_OTHER) {
+ return GFIELD(sd->caps3, CAP3_RETUNING_TC);
+ } else {
+ return (uint8)sd_tuning_period;
+ }
+}
+
+uint32 sdstd_3_get_uhsi_clkmode(sdioh_info_t *sd)
+{
+ return sd_uhsimode;
+}
+
+/* check, to see if the card supports driver_type corr to the driver_type
+ in preset value, which will be selected by requested UHSI mode
+ input:
+ clk mode: valid values: SD3CLKMODE_2_SDR50, SD3CLKMODE_3_SDR104,
+ SD3CLKMODE_4_DDR50, SD3CLKMODE_AUTO
+ outputs:
+ return_val: TRUE; if a matching drvstrn for the given clkmode is
+ found in both HC and card. otherwise, FALSE.
+ [other outputs below valid ONLY if return_val is TRUE]
+ drvstrn : driver strength read from CCCR.
+ presetval: value of preset reg, corr to the clkmode.
+ */
+static bool
+sdstd_3_get_matching_drvstrn(sdioh_info_t *sd, int sd3_requested_clkmode,
+ uint32 *drvstrn, uint16 *presetval)
+{
+ int status;
+ uint8 presetreg;
+ uint8 cccr_reqd_dtype_mask = 1;
+
+ sd3_trace(("sd3: %s:\n", __FUNCTION__));
+
+ if (sd3_requested_clkmode != SD3CLKMODE_AUTO) {
+ /* CARD: get the card driver strength from cccr */
+ if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_DRIVER_STRENGTH,
+ 1, drvstrn)) != BCME_OK) {
+ sd_err(("%s:Reading SDIOD_CCCR_DRIVER_STRENGTH from card"
+ "Failed!\n", __FUNCTION__));
+ return FALSE;
+ }
+ if (TRUE != sd3_sw_override1) {
+ /* HOSTC: get the addr of preset register indexed by the clkmode */
+ presetreg = SD3_PresetValStart +
+ (2*sd3_requested_clkmode + 6);
+ *presetval = sdstd_rreg16(sd, presetreg);
+ } else {
+ /* Note: +3 for mapping between SD3CLKMODE_xxx and presetval_sw_table */
+ *presetval = presetval_sw_table[sd3_requested_clkmode + 3];
+ }
+ sd_err(("%s:reqCLK: %d, presetval: 0x%x\n",
+ __FUNCTION__, sd3_requested_clkmode, *presetval));
+
+ cccr_reqd_dtype_mask <<= GFIELD(*presetval, PRESET_DRIVR_SELECT);
+
+ /* compare/match */
+ if (!(cccr_reqd_dtype_mask & GFIELD(*drvstrn, SDIO_BUS_DRVR_TYPE_CAP))) {
+ sd_err(("%s:cccr_reqd_dtype_mask and SDIO_BUS_DRVR_TYPE_CAP"
+ "not matching!:reqd:0x%x, cap:0x%x\n", __FUNCTION__,
+ cccr_reqd_dtype_mask, GFIELD(*drvstrn, SDIO_BUS_DRVR_TYPE_CAP)));
+ return FALSE;
+ } else {
+ /* check if drive strength override is required. If so, first setit */
+ if (*dhd_sdiod_uhsi_ds_override != DRVSTRN_IGNORE_CHAR) {
+ int ds_offset = 0;
+ uint32 temp = 0;
+
+ /* drvstrn to reflect the preset val: this is default */
+ *drvstrn = GFIELD(*presetval, PRESET_DRIVR_SELECT);
+
+ /* now check override */
+ ds_offset = (((int)DRVSTRN_MAX_CHAR -
+ (int)(*dhd_sdiod_uhsi_ds_override)));
+ if ((ds_offset >= 0) && (ds_offset <= MAX_DTS_INDEX)) {
+ ds_offset = MAX_DTS_INDEX - ds_offset;
+ sd_err(("%s:Drive strength override: %c, offset: "
+ "%d, val: %d\n", __FUNCTION__,
+ *dhd_sdiod_uhsi_ds_override,
+ ds_offset, DTS_vals[ds_offset]));
+ temp = SFIELD(*drvstrn, SDIO_BUS_DRVR_TYPE_SEL,
+ DTS_vals[ds_offset]);
+ sd_err(("%s:DrvStrn orig: 0x%x, modif: 0x%x\n",
+ __FUNCTION__, *drvstrn, temp));
+ *drvstrn = temp;
+ } else {
+ /* else case is default: use preset val */
+ sd_err(("%s:override invalid: DrvStrn is from "
+ "preset: 0x%x\n",
+ __FUNCTION__, *drvstrn));
+ }
+ } else {
+ sd_err(("%s:DrvStrn is from preset: 0x%x\n",
+ __FUNCTION__, *drvstrn));
+ }
+ }
+ } else {
+ /* TBD check for sd3_requested_clkmode : -1 also. */
+ sd_err(("%s: Automode not supported!\n", __FUNCTION__));
+ return FALSE;
+ }
+ return TRUE;
+}
+
+/* Returns a matching UHSI clk speed is found. If not, returns -1.
+ Also, if sd3_requested_clkmode is -1, finds the closest max match clk and returns.
+ */
+static int
+sdstd_3_get_matching_uhsi_clkmode(sdioh_info_t *sd, int sd3_requested_clkmode)
+{
+ uint32 card_val_uhsisupp;
+ uint8 speedmask = 1;
+ uint32 drvstrn;
+ uint16 presetval;
+ int status;
+
+ sd3_trace(("sd3: %s:\n", __FUNCTION__));
+
+ sd->global_UHSI_Supp = HOST_SDR_UNSUPP;
+
+ /* for legacy/25MHz/50MHz bus speeds, no checks done here */
+ if ((sd3_requested_clkmode == SD3CLKMODE_0_SDR12) ||
+ (sd3_requested_clkmode == SD3CLKMODE_1_SDR25)) {
+ sd->global_UHSI_Supp = HOST_SDR_12_25;
+ return sd3_requested_clkmode;
+ }
+ /* get cap of card */
+ if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_UHSI_SUPPORT,
+ 1, &card_val_uhsisupp)) != BCME_OK) {
+ sd_err(("%s:SDIOD_CCCR_UHSI_SUPPORT query failed!\n", __FUNCTION__));
+ return -1;
+ }
+ sd_info(("%s:Read %08x from Card at %08x\n", __FUNCTION__,
+ card_val_uhsisupp, SDIOD_CCCR_UHSI_SUPPORT));
+
+ if (sd3_requested_clkmode != SD3CLKMODE_AUTO) {
+ /* Note: it is assumed that, following are executed when (sd3ClkMode >= 2) */
+ speedmask <<= (sd3_requested_clkmode - SD3CLKMODE_2_SDR50);
+
+ /* check first about 3.0 HS CLK modes */
+ if (!(GFIELD(sd->caps3, CAP3_30CLKCAP) & speedmask)) {
+ sd_err(("%s:HC does not support req 3.0 UHSI mode."
+ "requested:%d; capable:0x%x\n", __FUNCTION__,
+ sd3_requested_clkmode, GFIELD(sd->caps3, CAP3_30CLKCAP)));
+ return -1;
+ }
+
+ /* check first about 3.0 CARD CLK modes */
+ if (!(GFIELD(card_val_uhsisupp, SDIO_BUS_SPEED_UHSICAP) & speedmask)) {
+ sd_err(("%s:Card does not support req 3.0 UHSI mode. requested:%d;"
+ "capable:0x%x\n", __FUNCTION__, sd3_requested_clkmode,
+ GFIELD(card_val_uhsisupp, SDIO_BUS_SPEED_UHSICAP)));
+ return -1;
+ }
+
+ /* check, to see if the card supports driver_type corr to the
+ driver_type in preset value, which will be selected by
+ requested UHSI mode
+ */
+ if (!sdstd_3_get_matching_drvstrn(sd, sd3_requested_clkmode,
+ &drvstrn, &presetval)) {
+ sd_err(("%s:DRVStrn mismatch!: card strn:0x%x; HC preset"
+ "val:0x%x\n", __FUNCTION__, drvstrn, presetval));
+ return -1;
+ }
+ /* success path. change the support variable accordingly */
+ sd->global_UHSI_Supp = HOST_SDR_50_104_DDR;
+ return sd3_requested_clkmode;
+ } else {
+ /* auto clk selection: get the highest clock capable by both card and HC */
+/* TBD TOBE DONE */
+/* sd->global_UHSI_Supp = TRUE; on success */
+ return -1;
+ }
+}
+
+static int
+sdstd_3_sigvoltswitch_proc(sdioh_info_t *sd)
+{
+ int status;
+ uint32 cmd_rsp = 0, presst;
+ uint16 val1 = 0;
+
+ sd3_trace(("sd3: %s:\n", __FUNCTION__));
+
+ /* Issue cmd11 */
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_11, 0))
+ != SUCCESS) {
+ sd_err(("%s: CMD11 failed\n", __FUNCTION__));
+ return status;
+ }
+
+ /* check response */
+ sdstd_cmd_getrsp(sd, &cmd_rsp, 1);
+ if (
+ GFIELD(cmd_rsp, RSP1_ERROR) || /* bit 19 */
+ GFIELD(cmd_rsp, RSP1_ILLEGAL_CMD) || /* bit 22 */
+ GFIELD(cmd_rsp, RSP1_COM_CRC_ERROR) || /* bit 23 */
+ GFIELD(cmd_rsp, RSP1_CARD_LOCKED) /* bit 25 */ ) {
+ sd_err(("%s: FAIL:CMD11: cmd_resp:0x%x\n", __FUNCTION__, cmd_rsp));
+ return ERROR;
+ }
+
+ /* SD Clock Enable = 0 */
+ sdstd_wreg16(sd, SD_ClockCntrl,
+ sdstd_rreg16(sd, SD_ClockCntrl) & ~((uint16)0x4));
+
+ /* check DAT[3..0] using Present State Reg. If not 0, error */
+ presst = sdstd_rreg(sd, SD_PresentState);
+ if (0 != GFIELD(presst, PRES_DAT_SIGNAL)) {
+ sd_err(("%s: FAIL: PRESTT:0x%x\n", __FUNCTION__, presst));
+ return ERROR;
+ }
+
+ /* turn 1.8V sig enable in HC2 */
+ val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
+ val1 = SFIELD(val1, HOSTCtrl2_1_8SIG_EN, 1);
+ sdstd_wreg16(sd, SD3_HostCntrl2, val1);
+
+ /* wait 5ms */
+ OSL_DELAY(5000);
+
+ /* check 1.8V sig enable in HC2. if cleared, error */
+ val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
+
+ if (!GFIELD(val1, HOSTCtrl2_1_8SIG_EN)) {
+ sd_err(("%s: FAIL: HC2:1.8V_En:0x%x\n", __FUNCTION__, val1));
+ return ERROR;
+ }
+
+ /* SD Clock Enable = 1 */
+ val1 = sdstd_rreg16(sd, SD_ClockCntrl);
+ sdstd_wreg16(sd, SD_ClockCntrl, val1 | 0x4);
+
+ /* wait 1ms */
+ OSL_DELAY(1000);
+
+ /* check DAT[3..0] using Present State Reg. If not 0b1111, error */
+ presst = sdstd_rreg(sd, SD_PresentState);
+ if (0xf != GFIELD(presst, PRES_DAT_SIGNAL)) {
+ sd_err(("%s: FAIL: PRESTT_FINAL:0x%x\n", __FUNCTION__, presst));
+ return ERROR;
+ }
+
+ return (SUCCESS);
+}
+
+static int
+sdstd_set_highspeed_mode(sdioh_info_t *sd, bool HSMode)
+{
+ uint32 regdata;
+ int status;
+ uint8 reg8;
+
+ uint32 drvstrn;
+
+ reg8 = sdstd_rreg8(sd, SD_HostCntrl);
+
+#ifdef BCMINTERNAL
+ /* The Jinvani SD Gold Host forces the highest clock rate in high-speed mode */
+ /* Only enable high-speed mode if the SD clock divisor is 1. */
+ if (sd->controller_type == SDIOH_TYPE_JINVANI_GOLD) {
+ if (sd_divisor != 1) {
+ HSMode = FALSE;
+ }
+ }
+#endif /* BCMINTERNAL */
+
+ if (HSMode == TRUE) {
+ if (sd_hiok && (GFIELD(sd->caps, CAP_HIGHSPEED)) == 0) {
+ sd_err(("Host Controller does not support hi-speed mode.\n"));
+ return BCME_ERROR;
+ }
+
+ sd_info(("Attempting to enable High-Speed mode.\n"));
+
+ if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+ 1, ®data)) != SUCCESS) {
+ return BCME_SDIO_ERROR;
+ }
+ if (regdata & SDIO_SPEED_SHS) {
+ sd_info(("Device supports High-Speed mode.\n"));
+
+ regdata |= SDIO_SPEED_EHS;
+
+ sd_info(("Writing %08x to Card at %08x\n",
+ regdata, SDIOD_CCCR_SPEED_CONTROL));
+ if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+ 1, regdata)) != BCME_OK) {
+ return BCME_SDIO_ERROR;
+ }
+
+ if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+ 1, ®data)) != BCME_OK) {
+ return BCME_SDIO_ERROR;
+ }
+
+ sd_info(("Read %08x to Card at %08x\n", regdata, SDIOD_CCCR_SPEED_CONTROL));
+
+ reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 1);
+
+ sd_err(("High-speed clocking mode enabled.\n"));
+ }
+ else {
+ sd_err(("Device does not support High-Speed Mode.\n"));
+ reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 0);
+ }
+ } else {
+ /* Force off device bit */
+ if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+ 1, ®data)) != BCME_OK) {
+ return status;
+ }
+ if (regdata & SDIO_SPEED_EHS) {
+ regdata &= ~SDIO_SPEED_EHS;
+ if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+ 1, regdata)) != BCME_OK) {
+ return status;
+ }
+ }
+
+ sd_err(("High-speed clocking mode disabled.\n"));
+ reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 0);
+ }
+
+ if ((sd->host_UHSISupported) && (sd->card_UHSI_voltage_Supported)) {
+ /* also set the default driver strength in the card/HC [this is reqd because,
+ if earlier we selected any other drv_strn, we need to reset it]
+ */
+ /* get the card driver strength from cccr */
+ if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_DRIVER_STRENGTH,
+ 1, &drvstrn)) != BCME_OK) {
+ sd_err(("%s:Reading SDIOD_CCCR_DRIVER_STRENGTH from card"
+ "Failed!\n", __FUNCTION__));
+ return BCME_SDIO_ERROR;
+ }
+
+ /* reset card drv strn */
+ drvstrn = SFIELD(drvstrn, SDIO_BUS_DRVR_TYPE_SEL, 0);
+
+ /* set card drv strn */
+ if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_DRIVER_STRENGTH,
+ 1, drvstrn)) != BCME_OK) {
+ sd_err(("%s:Setting SDIOD_CCCR_DRIVER_STRENGTH in"
+ "card Failed!\n", __FUNCTION__));
+ return BCME_SDIO_ERROR;
+ }
+ }
+
+ sdstd_wreg8(sd, SD_HostCntrl, reg8);
+
+ return BCME_OK;
+}
+
+/* Select DMA Mode:
+ * If dma_mode == DMA_MODE_AUTO, pick the "best" mode.
+ * Otherwise, pick the selected mode if supported.
+ * If not supported, use PIO mode.
+ */
+static int
+sdstd_set_dma_mode(sdioh_info_t *sd, int8 dma_mode)
+{
+ uint8 reg8, dma_sel_bits = SDIOH_SDMA_MODE;
+ int8 prev_dma_mode = sd->sd_dma_mode;
+
+ switch (prev_dma_mode) {
+ case DMA_MODE_AUTO:
+ sd_dma(("%s: Selecting best DMA mode supported by controller.\n",
+ __FUNCTION__));
+ if (GFIELD(sd->caps, CAP_ADMA2)) {
+ sd->sd_dma_mode = DMA_MODE_ADMA2;
+ dma_sel_bits = SDIOH_ADMA2_MODE;
+ } else if (GFIELD(sd->caps, CAP_ADMA1)) {
+ sd->sd_dma_mode = DMA_MODE_ADMA1;
+ dma_sel_bits = SDIOH_ADMA1_MODE;
+ } else if (GFIELD(sd->caps, CAP_DMA)) {
+ sd->sd_dma_mode = DMA_MODE_SDMA;
+ } else {
+ sd->sd_dma_mode = DMA_MODE_NONE;
+ }
+ break;
+ case DMA_MODE_NONE:
+ sd->sd_dma_mode = DMA_MODE_NONE;
+ break;
+ case DMA_MODE_SDMA:
+ if (GFIELD(sd->caps, CAP_DMA)) {
+ sd->sd_dma_mode = DMA_MODE_SDMA;
+ } else {
+ sd_err(("%s: SDMA not supported by controller.\n", __FUNCTION__));
+ sd->sd_dma_mode = DMA_MODE_NONE;
+ }
+ break;
+ case DMA_MODE_ADMA1:
+ if (GFIELD(sd->caps, CAP_ADMA1)) {
+ sd->sd_dma_mode = DMA_MODE_ADMA1;
+ dma_sel_bits = SDIOH_ADMA1_MODE;
+ } else {
+ sd_err(("%s: ADMA1 not supported by controller.\n", __FUNCTION__));
+ sd->sd_dma_mode = DMA_MODE_NONE;
+ }
+ break;
+ case DMA_MODE_ADMA2:
+ if (GFIELD(sd->caps, CAP_ADMA2)) {
+ sd->sd_dma_mode = DMA_MODE_ADMA2;
+ dma_sel_bits = SDIOH_ADMA2_MODE;
+ } else {
+ sd_err(("%s: ADMA2 not supported by controller.\n", __FUNCTION__));
+ sd->sd_dma_mode = DMA_MODE_NONE;
+ }
+ break;
+ case DMA_MODE_ADMA2_64:
+ sd_err(("%s: 64b ADMA2 not supported by driver.\n", __FUNCTION__));
+ sd->sd_dma_mode = DMA_MODE_NONE;
+ break;
+ default:
+ sd_err(("%s: Unsupported DMA Mode %d requested.\n", __FUNCTION__,
+ prev_dma_mode));
+ sd->sd_dma_mode = DMA_MODE_NONE;
+ break;
+ }
+
+ /* clear SysAddr, only used for SDMA */
+ sdstd_wreg(sd, SD_SysAddr, 0);
+
+ sd_err(("%s: %s mode selected.\n", __FUNCTION__, dma_mode_description[sd->sd_dma_mode]));
+
+ reg8 = sdstd_rreg8(sd, SD_HostCntrl);
+ reg8 = SFIELD(reg8, HOST_DMA_SEL, dma_sel_bits);
+ sdstd_wreg8(sd, SD_HostCntrl, reg8);
+ sd_dma(("%s: SD_HostCntrl=0x%02x\n", __FUNCTION__, reg8));
+
+ return BCME_OK;
+}
+
+#ifdef BCMDBG
+void
+print_regs(sdioh_info_t *sd)
+{
+ uint8 reg8 = 0;
+ uint16 reg16 = 0;
+ uint32 reg32 = 0;
+ uint8 presetreg;
+ int i;
+
+ reg8 = sdstd_rreg8(sd, SD_BlockSize);
+ printf("REGS: SD_BlockSize [004h]:0x%x\n", reg8);
+
+ reg8 = sdstd_rreg8(sd, SD_BlockCount);
+ printf("REGS: SD_BlockCount [006h]:0x%x\n", reg8);
+
+ reg8 = sdstd_rreg8(sd, SD_BlockSize);
+ printf("REGS: SD_BlockSize [004h]:0x%x\n", reg8);
+
+ reg8 = sdstd_rreg8(sd, SD_TransferMode);
+ printf("REGS: SD_TransferMode [00Ch]:0x%x\n", reg8);
+
+ reg8 = sdstd_rreg8(sd, SD_HostCntrl);
+ printf("REGS: SD_HostCntrl [028h]:0x%x\n", reg8);
+
+ reg32 = sdstd_rreg(sd, SD_PresentState);
+ printf("REGS: SD_PresentState [024h]:0x%x\n", reg32);
+
+ reg8 = sdstd_rreg8(sd, SD_PwrCntrl);
+ printf("REGS: SD_PwrCntrl [029h]:0x%x\n", reg8);
+
+ reg8 = sdstd_rreg8(sd, SD_BlockGapCntrl);
+ printf("REGS: SD_BlockGapCntrl [02Ah]:0x%x\n", reg8);
+
+ reg8 = sdstd_rreg8(sd, SD_WakeupCntrl);
+ printf("REGS: SD_WakeupCntrl [02Bh]:0x%x\n", reg8);
+
+ reg16 = sdstd_rreg16(sd, SD_ClockCntrl);
+ printf("REGS: SD_ClockCntrl [02Ch]:0x%x\n", reg16);
+
+ reg8 = sdstd_rreg8(sd, SD_TimeoutCntrl);
+ printf("REGS: SD_TimeoutCntrl [02Eh]:0x%x\n", reg8);
+
+ reg8 = sdstd_rreg8(sd, SD_SoftwareReset);
+ printf("REGS: SD_SoftwareReset [02Fh]:0x%x\n", reg8);
+
+ reg16 = sdstd_rreg16(sd, SD_IntrStatus);
+ printf("REGS: SD_IntrStatus [030h]:0x%x\n", reg16);
+
+ reg16 = sdstd_rreg16(sd, SD_ErrorIntrStatus);
+ printf("REGS: SD_ErrorIntrStatus [032h]:0x%x\n", reg16);
+
+ reg16 = sdstd_rreg16(sd, SD_IntrStatusEnable);
+ printf("REGS: SD_IntrStatusEnable [034h]:0x%x\n", reg16);
+
+ reg16 = sdstd_rreg16(sd, SD_ErrorIntrStatusEnable);
+ printf("REGS: SD_ErrorIntrStatusEnable [036h]:0x%x\n", reg16);
+
+ reg16 = sdstd_rreg16(sd, SD_IntrSignalEnable);
+ printf("REGS: SD_IntrSignalEnable [038h]:0x%x\n", reg16);
+
+ reg16 = sdstd_rreg16(sd, SD_ErrorIntrSignalEnable);
+ printf("REGS: SD_ErrorIntrSignalEnable [03Ah]:0x%x\n", reg16);
+
+ reg32 = sdstd_rreg(sd, SD_Capabilities);
+ printf("REGS: SD_Capabilities [040h]:0x%x\n", reg32);
+
+ reg32 = sdstd_rreg(sd, SD_MaxCurCap);
+ printf("REGS: SD_MaxCurCap [04Ah]:0x%x\n", reg32);
+
+ reg32 = sdstd_rreg(sd, SD_Capabilities3);
+ printf("REGS: SD_Capabilities3 [044h]:0x%x\n", reg32);
+
+ reg16 = sdstd_rreg16(sd, SD3_HostCntrl2);
+ printf("REGS: SD3_HostCntrl2 [03Eh]:0x%x\n", reg16);
+
+ for (i = 0; i < 8; i++) {
+ presetreg = SD3_PresetValStart + i*2;
+ printf("REGS: Presetvalreg:ix[%d]:0x%x, val=0x%x\n", i,
+ presetreg, sdstd_rreg16(sd, presetreg));
+ }
+
+ reg16 = sdstd_rreg16(sd, SD_SlotInterruptStatus);
+ printf("REGS: SD_SlotInterruptStatus [0FCh]:0x%x\n", reg16);
+
+ reg16 = sdstd_rreg16(sd, SD_HostControllerVersion);
+ printf("REGS: SD_HostControllerVersion [0FEh]:0x%x\n", reg16);
+}
+#endif /* BCMDBG */
+
+#ifdef BCMINTERNAL
+#ifdef NOTUSED
+static int
+parse_state(uint32 state, char *buf, int len)
+{
+ char *data = buf;
+
+ sd_err(("Parsing state 0x%x\n", state));
+ if (!len) {
+ return (0);
+ }
+
+ data += sprintf(data, "cmd_inhibit %d\n", GFIELD(state, PRES_CMD_INHIBIT));
+ data += sprintf(data, "dat_inhibit %d\n", GFIELD(state, PRES_DAT_INHIBIT));
+ data += sprintf(data, "dat_busy %d\n", GFIELD(state, PRES_DAT_BUSY));
+ data += sprintf(data, "write_active %d\n", GFIELD(state, PRES_WRITE_ACTIVE));
+ data += sprintf(data, "read_active %d\n", GFIELD(state, PRES_READ_ACTIVE));
+ data += sprintf(data, "write_data_rdy %d\n", GFIELD(state, PRES_WRITE_DATA_RDY));
+ data += sprintf(data, "read_data_rdy %d\n", GFIELD(state, PRES_READ_DATA_RDY));
+ data += sprintf(data, "card_present %d\n", GFIELD(state, PRES_CARD_PRESENT));
+ data += sprintf(data, "card_stable %d\n", GFIELD(state, PRES_CARD_STABLE));
+ data += sprintf(data, "card_present_raw %d\n", GFIELD(state, PRES_CARD_PRESENT_RAW));
+ data += sprintf(data, "write_enabled %d\n", GFIELD(state, PRES_WRITE_ENABLED));
+ data += sprintf(data, "cmd_signal %d\n", GFIELD(state, PRES_CMD_SIGNAL));
+
+ return (data - buf);
+}
+
+static int
+parse_caps(uint32 cap, char *buf, int len)
+{
+ int block = 0xbeef;
+ char *data = buf;
+
+ data += sprintf(data, "TimeOut Clock Freq:\t%d\n", GFIELD(cap, CAP_TO_CLKFREQ));
+ data += sprintf(data, "TimeOut Clock Unit:\t%d\n", GFIELD(cap, CAP_TO_CLKUNIT));
+ data += sprintf(data, "Base Clock:\t\t%d\n", GFIELD(cap, CAP_BASECLK));
+ switch (GFIELD(cap, CAP_MAXBLOCK)) {
+ case 0: block = 512; break;
+ case 1: block = 1024; break;
+ case 2: block = 2048; break;
+ case 3: block = 0; break;
+ }
+ data += sprintf(data, "Max Block Size:\t\t%d\n", block);
+ data += sprintf(data, "Support High Speed:\t%d\n", GFIELD(cap, CAP_HIGHSPEED));
+ data += sprintf(data, "Support DMA:\t\t%d\n", GFIELD(cap, CAP_DMA));
+ data += sprintf(data, "Support Suspend:\t%d\n", GFIELD(cap, CAP_SUSPEND));
+ data += sprintf(data, "Support 3.3 Volts:\t%d\n", GFIELD(cap, CAP_VOLT_3_3));
+ data += sprintf(data, "Support 3.0 Volts:\t%d\n", GFIELD(cap, CAP_VOLT_3_0));
+ data += sprintf(data, "Support 1.8 Volts:\t%d\n", GFIELD(cap, CAP_VOLT_1_8));
+ return (data - buf);
+}
+#endif /* NOTUSED */
+#endif /* BCMINTERNAL */
+
+/* XXX Per SDIO Host Controller Spec section 3.2.1
+ Note: for 2.x HC, new_sd_divisor should be a power of 2, but for 3.0
+ HC, new_sd_divisor should be a multiple of 2.
+*/
+bool
+sdstd_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor)
+{
+ uint rc, count;
+ uint16 divisor;
+ uint16 regdata;
+ uint16 val1;
+
+ sd3_trace(("%s: starting clk\n", __FUNCTION__));
+ /* turn off HC clock */
+ sdstd_wreg16(sd, SD_ClockCntrl,
+ sdstd_rreg16(sd, SD_ClockCntrl) & ~((uint16)0x4)); /* Disable the HC clock */
+
+ /* Set divisor */
+ if (sd->host_UHSISupported) {
+#ifdef BCMDBG
+ if ((new_sd_divisor != 1) && /* 1 is a valid value */
+ ((new_sd_divisor & (0x1)) || /* check for multiple of 2 */
+ (new_sd_divisor == 0) ||
+ (new_sd_divisor > 0x3ff))) {
+ sd_err(("3.0: Invalid clock divisor target: %d\n", new_sd_divisor));
+ return FALSE;
+ }
+#endif
+ divisor = (new_sd_divisor >> 1);
+ } else
+ {
+#ifdef BCMDBG
+ if ((new_sd_divisor & (new_sd_divisor-1)) ||
+ (new_sd_divisor == 0)) {
+ sd_err(("Invalid clock divisor target: %d\n", new_sd_divisor));
+ return FALSE;
+ }
+#endif
+ /* new logic: if divisor > 256, restrict to 256 */
+ if (new_sd_divisor > 256)
+ new_sd_divisor = 256;
+ divisor = (new_sd_divisor >> 1) << 8;
+ }
+#ifdef BCMINTERNAL
+ if (sd->controller_type == SDIOH_TYPE_JINVANI_GOLD) {
+ divisor = (new_sd_divisor >> 2) << 8;
+ }
+#endif /* BCMINTERNAL */
+
+ sd_info(("Clock control is 0x%x\n", sdstd_rreg16(sd, SD_ClockCntrl)));
+ if (sd->host_UHSISupported) {
+ /* *get preset value and shift so that.
+ * bits 0-7 are in 15-8 and 9-8 are in 7-6 of clkctrl
+ */
+ val1 = divisor << 2;
+ val1 &= 0x0ffc;
+ val1 |= divisor >> 8;
+ val1 <<= 6;
+ printf("divisor:%x;val1:%x\n", divisor, val1);
+ sdstd_mod_reg16(sd, SD_ClockCntrl, 0xffC0, val1);
+ } else
+ {
+ sdstd_mod_reg16(sd, SD_ClockCntrl, 0xff00, divisor);
+ }
+
+ sd_err(("%s: Using clock divisor of %d (regval 0x%04x)\n", __FUNCTION__,
+ new_sd_divisor, divisor));
+ if (new_sd_divisor > 0)
+ sd_err(("%s:now, divided clk is: %d Hz\n",
+ __FUNCTION__, GFIELD(sd->caps, CAP_BASECLK)*1000000/new_sd_divisor));
+ else
+ sd_err(("Using Primary Clock Freq of %d MHz\n", GFIELD(sd->caps, CAP_BASECLK)));
+ sd_info(("Primary Clock Freq = %d MHz\n", GFIELD(sd->caps, CAP_BASECLK)));
+ if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 50) {
+ sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
+ ((50 % new_sd_divisor) ? (50000 / new_sd_divisor) : (50 / new_sd_divisor)),
+ ((50 % new_sd_divisor) ? "KHz" : "MHz")));
+ } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 48) {
+ sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
+ ((48 % new_sd_divisor) ? (48000 / new_sd_divisor) : (48 / new_sd_divisor)),
+ ((48 % new_sd_divisor) ? "KHz" : "MHz")));
+ } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 33) {
+ sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
+ ((33 % new_sd_divisor) ? (33000 / new_sd_divisor) : (33 / new_sd_divisor)),
+ ((33 % new_sd_divisor) ? "KHz" : "MHz")));
+ } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 31) {
+ sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
+ ((31 % new_sd_divisor) ? (31000 / new_sd_divisor) : (31 / new_sd_divisor)),
+ ((31 % new_sd_divisor) ? "KHz" : "MHz")));
+ } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 8) {
+ sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
+ ((8 % new_sd_divisor) ? (8000 / new_sd_divisor) : (8 / new_sd_divisor)),
+ ((8 % new_sd_divisor) ? "KHz" : "MHz")));
+ } else if (sd->controller_type == SDIOH_TYPE_BCM27XX) {
+ /* XXX - BCM 27XX Standard Host Controller returns 0 for CLKFREQ */
+ } else {
+ sd_err(("Need to determine divisor for %d MHz clocks\n",
+ GFIELD(sd->caps, CAP_BASECLK)));
+ sd_err(("Consult SD Host Controller Spec: Clock Control Register\n"));
+ return (FALSE);
+ }
+
+ sdstd_or_reg16(sd, SD_ClockCntrl, 0x1); /* Enable the clock */
+
+ /* Wait for clock to stabilize */
+ rc = (sdstd_rreg16(sd, SD_ClockCntrl) & 2);
+ count = 0;
+ while (!rc) {
+ OSL_DELAY(1);
+ sd_info(("Waiting for clock to become stable 0x%x\n", rc));
+ rc = (sdstd_rreg16(sd, SD_ClockCntrl) & 2);
+ count++;
+ if (count > 10000) {
+ sd_err(("%s:Clocks failed to stabilize after %u attempts\n",
+ __FUNCTION__, count));
+ return (FALSE);
+ }
+ }
+ /* Turn on clock */
+ sdstd_or_reg16(sd, SD_ClockCntrl, 0x4);
+
+ OSL_DELAY(20);
+
+ /* Set timeout control (adjust default value based on divisor).
+ * Disabling timeout interrupts during setting is advised by host spec.
+ */
+#ifdef BCMQT
+ if (GFIELD(sd->caps, CAP_BASECLK) < 50)
+#endif
+ {
+ uint toval;
+
+ toval = sd_toctl;
+ divisor = new_sd_divisor;
+
+ while (toval && !(divisor & 1)) {
+ toval -= 1;
+ divisor >>= 1;
+ }
+
+ regdata = sdstd_rreg16(sd, SD_ErrorIntrStatusEnable);
+ sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, (regdata & ~ERRINT_DATA_TIMEOUT_BIT));
+ sdstd_wreg8(sd, SD_TimeoutCntrl, (uint8)toval);
+ sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, regdata);
+ }
+#ifdef BCMQT
+ else {
+ sd_info(("%s: REsetting err int control\n", __FUNCTION__));
+ /* XXX: turn off timeout INT, it resets clk ctrl bit */
+ regdata = sdstd_rreg16(sd, SD_ErrorIntrStatusEnable);
+ sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, (regdata & ~ERRINT_DATA_TIMEOUT_BIT));
+ }
+#endif
+ OSL_DELAY(2);
+
+ sd_info(("Final Clock control is 0x%x\n", sdstd_rreg16(sd, SD_ClockCntrl)));
+
+ return TRUE;
+}
+
+/* XXX Per SDIO Host Controller Spec 3.3
+ * volts_req:
+ * 0 means default: select highest voltage.
+ * 1 means 1.8V
+ * 2 means 3.0V
+ * 3 means 3.3V
+ * returns
+ * TRUE: no error
+ * FALSE: general error
+ * SDIO_OCR_READ_FAIL: ocr reading failure. Now the HC has to try in other available voltages.
+*/
+uint16
+sdstd_start_power(sdioh_info_t *sd, int volts_req)
+{
+ char *s;
+ uint32 cmd_arg;
+ uint32 cmd_rsp;
+ uint8 pwr = 0;
+ int volts = 0;
+ uint16 val1;
+ uint16 init_divider = 0;
+ uint8 baseclk = 0;
+ bool selhighest = (volts_req == 0) ? TRUE : FALSE;
+
+ /* reset the card uhsi volt support to false */
+ sd->card_UHSI_voltage_Supported = FALSE;
+
+ /* Ensure a power on reset by turning off bus power in case it happened to
+ * be on already. (This might happen if driver doesn't unload/clean up correctly,
+ * crash, etc.) Leave off for 100ms to make sure the power off isn't
+ * ignored/filtered by the device. Note we can't skip this step if the power is
+ * off already since we don't know how long it has been off before starting
+ * the driver.
+ */
+ sdstd_wreg8(sd, SD_PwrCntrl, 0);
+ sd_info(("Turning off VDD/bus power briefly (100ms) to ensure reset\n"));
+ OSL_DELAY(100000);
+
+ /* For selecting highest available voltage, start from lowest and iterate */
+ if (!volts_req)
+ volts_req = 1;
+
+ s = NULL;
+
+ if (volts_req == 1) {
+ if (GFIELD(sd->caps, CAP_VOLT_1_8)) {
+ volts = 5;
+ s = "1.8";
+ if (FALSE == selhighest)
+ goto voltsel;
+ else
+ volts_req++;
+ } else {
+ sd_err(("HC doesn't support voltage! trying higher voltage: %d\n", volts));
+ volts_req++;
+ }
+ }
+
+ if (volts_req == 2) {
+ if (GFIELD(sd->caps, CAP_VOLT_3_0)) {
+ volts = 6;
+ s = "3.0";
+ if (FALSE == selhighest)
+ goto voltsel;
+ else volts_req++;
+ } else {
+ sd_err(("HC doesn't support voltage! trying higher voltage: %d\n", volts));
+ volts_req++;
+ }
+ }
+
+ if (volts_req == 3) {
+ if (GFIELD(sd->caps, CAP_VOLT_3_3)) {
+ volts = 7;
+ s = "3.3";
+ } else {
+ if ((FALSE == selhighest) || (volts == 0)) {
+ sd_err(("HC doesn't support any voltage! error!\n"));
+ return FALSE;
+ }
+ }
+ }
+
+ /* XXX
+ * if UHSI is NOT supported, check for other voltages also. This is a safety measure
+ * for embedded devices also, so that HC starts in lower power first. If this
+ * function fails, the caller may disable UHSISupported
+ * and call start power again to check support in higher voltages.
+ */
+
+voltsel:
+ pwr = SFIELD(pwr, PWR_VOLTS, volts);
+ pwr = SFIELD(pwr, PWR_BUS_EN, 1);
+ sdstd_wreg8(sd, SD_PwrCntrl, pwr); /* Set Voltage level */
+ sd_info(("Setting Bus Power to %s Volts\n", s));
+ BCM_REFERENCE(s);
+
+ /*
+ * PR101766 : BRCM SDIO3.0 card is an embedded SD device. It is not a SD card.
+ * VDDIO signalling will be tied to 1.8v level on all SDIO3.0 based boards.
+ * So program the HC to drive VDDIO at 1.8v level.
+ */
+ if ((sd->version == HOST_CONTR_VER_3) && (volts == 5)) {
+ val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
+ val1 = SFIELD(val1, HOSTCtrl2_1_8SIG_EN, 1);
+ sdstd_wreg16(sd, SD3_HostCntrl2, val1);
+ }
+
+ /* Wait for 500ms for power to stabilize. Some designs have reset IC's
+ * which can hold reset low for close to 300ms. In addition there can
+ * be ramp time for VDD and/or VDDIO which might be provided from a LDO.
+ * For these reasons we need a pretty conservative delay here to have
+ * predictable reset behavior in the face of an unknown design.
+ */
+ OSL_DELAY(500000);
+
+ baseclk = GFIELD(sd->caps, CAP_BASECLK);
+ sd_info(("%s:baseclk: %d MHz\n", __FUNCTION__, baseclk));
+ /* for 3.0, find divisor */
+ if (sd->host_UHSISupported) {
+ /* ToDo : Dynamic modification of preset value table based on base clk */
+ sd3_trace(("sd3: %s: checking divisor\n", __FUNCTION__));
+ if (GFIELD(sd->caps3, CAP3_CLK_MULT) != 0) {
+ sd_err(("%s:Possible error: CLK Mul 1 CLOCKING NOT supported!\n",
+ __FUNCTION__));
+ return FALSE;
+ } else {
+ /* calculate dividor, which leads to 400KHz. */
+ init_divider = baseclk*10/4; /* baseclk*1000000/(400000); */
+ /* make it a multiple of 2. */
+ init_divider += (init_divider & 0x1);
+ sd_err(("%s:divider used for init:%d\n",
+ __FUNCTION__, init_divider));
+ }
+ } else {
+ /* Note: sd_divisor assumes that SDIO Base CLK is 50MHz. */
+ int final_freq_based_on_div = 50/sd_divisor;
+ if (baseclk > 50)
+ sd_divisor = baseclk/final_freq_based_on_div;
+ /* TBD: merge both SDIO 2.0 and 3.0 to share same divider logic */
+ init_divider = baseclk*10/4; /* baseclk*1000000/(400000); */
+ /* find next power of 2 */
+ NEXT_POW2(init_divider);
+ sd_err(("%s:NONUHSI: divider used for init:%d\n",
+ __FUNCTION__, init_divider));
+ }
+
+ /* Start at ~400KHz clock rate for initialization */
+ if (!sdstd_start_clock(sd, init_divider)) {
+ sd_err(("%s: sdstd_start_clock failed\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ /* Get the Card's Operation Condition. Occasionally the board
+ * takes a while to become ready
+ */
+ cmd_arg = 0;
+ cmd_rsp = 0;
+ if (get_ocr(sd, &cmd_arg, &cmd_rsp) != SUCCESS) {
+ sd_err(("%s: Failed to get OCR bailing\n", __FUNCTION__));
+ /* No need to reset as not sure in what state the card is. */
+ return SDIO_OCR_READ_FAIL;
+ }
+
+ sd_info(("cmd_rsp = 0x%x\n", cmd_rsp));
+ sd_info(("mem_present = %d\n", GFIELD(cmd_rsp, RSP4_MEM_PRESENT)));
+ sd_info(("num_funcs = %d\n", GFIELD(cmd_rsp, RSP4_NUM_FUNCS)));
+ sd_info(("card_ready = %d\n", GFIELD(cmd_rsp, RSP4_CARD_READY)));
+ sd_info(("OCR = 0x%x\n", GFIELD(cmd_rsp, RSP4_IO_OCR)));
+
+ /* Verify that the card supports I/O mode */
+ if (GFIELD(cmd_rsp, RSP4_NUM_FUNCS) == 0) {
+ sd_err(("%s: Card does not support I/O\n", __FUNCTION__));
+ return ERROR;
+ }
+ sd->num_funcs = GFIELD(cmd_rsp, RSP4_NUM_FUNCS);
+
+ /* Examine voltage: Arasan only supports 3.3 volts,
+ * so look for 3.2-3.3 Volts and also 3.3-3.4 volts.
+ */
+
+ /* XXX Pg 10 SDIO spec v1.10 */
+ if ((GFIELD(cmd_rsp, RSP4_IO_OCR) & (0x3 << 20)) == 0) {
+ sd_err(("This client does not support 3.3 volts!\n"));
+ return ERROR;
+ }
+ sd_info(("Leaving bus power at 3.3 Volts\n"));
+
+ cmd_arg = SFIELD(0, CMD5_OCR, 0xfff000);
+ /* if HC uhsi supported and card voltage set is 3.3V then switch to 1.8V */
+ if ((sd->host_UHSISupported) && (volts == 5)) {
+ /* set S18R also */
+ cmd_arg = SFIELD(cmd_arg, CMD5_S18R, 1);
+ }
+ cmd_rsp = 0;
+ get_ocr(sd, &cmd_arg, &cmd_rsp);
+ sd_info(("OCR = 0x%x\n", GFIELD(cmd_rsp, RSP4_IO_OCR)));
+
+ if ((sd->host_UHSISupported)) {
+ /* card responded with s18A => card supports sdio3.0,do tuning proc */
+ if (GFIELD(cmd_rsp, RSP4_S18A) == 1) {
+ if (sdstd_3_sigvoltswitch_proc(sd)) {
+ /* continue with legacy way of working */
+ sd_err(("%s: voltage switch not done. error, stopping\n",
+ __FUNCTION__));
+ /* How to gracefully proceced here? */
+ return FALSE;
+ } else {
+ sd->card_UHSI_voltage_Supported = TRUE;
+ sd_err(("%s: voltage switch SUCCESS!\n", __FUNCTION__));
+ }
+ } else {
+ /* This could happen for 2 cases.
+ * 1) means card is NOT sdio3.0 . Note that
+ * card_UHSI_voltage_Supported is already false.
+ * 2) card is sdio3.0 but it is already in 1.8V.
+ * But now, how to change host controller's voltage?
+ * In this case we need to do the following.
+ * sd->card_UHSI_voltage_Supported = TRUE;
+ * turn 1.8V sig enable in HC2
+ * val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
+ * val1 = SFIELD(val1, HOSTCtrl2_1_8SIG_EN, 1);
+ * sdstd_wreg16(sd, SD3_HostCntrl2, val1);
+ */
+ sd_info(("%s: Not sdio3.0: host_UHSISupported: %d; HC volts=%d\n",
+ __FUNCTION__, sd->host_UHSISupported, volts));
+ }
+ } else {
+ sd_info(("%s: Legacy [non sdio3.0] HC\n", __FUNCTION__));
+ }
+
+ return TRUE;
+}
+
+bool
+sdstd_bus_width(sdioh_info_t *sd, int new_mode)
+{
+ uint32 regdata;
+ int status;
+ uint8 reg8;
+
+ sd_trace(("%s\n", __FUNCTION__));
+ if (sd->sd_mode == new_mode) {
+ sd_info(("%s: Already at width %d\n", __FUNCTION__, new_mode));
+ /* Could exit, but continue just in case... */
+ }
+
+ /* Set client side via reg 0x7 in CCCR */
+ if ((status = sdstd_card_regread (sd, 0, SDIOD_CCCR_BICTRL, 1, ®data)) != SUCCESS)
+ return (bool)status;
+ regdata &= ~BUS_SD_DATA_WIDTH_MASK;
+ if (new_mode == SDIOH_MODE_SD4) {
+ sd_info(("Changing to SD4 Mode\n"));
+ regdata |= SD4_MODE;
+ } else if (new_mode == SDIOH_MODE_SD1) {
+ sd_info(("Changing to SD1 Mode\n"));
+ } else {
+ sd_err(("SPI Mode not supported by Standard Host Controller\n"));
+ }
+
+ if ((status = sdstd_card_regwrite (sd, 0, SDIOD_CCCR_BICTRL, 1, regdata)) != SUCCESS)
+ return (bool)status;
+
+ if (sd->host_UHSISupported) {
+ uint32 card_asyncint = 0;
+ uint16 host_asyncint = 0;
+
+ if ((status = sdstd_card_regread (sd, 0, SDIOD_CCCR_INTR_EXTN, 1,
+ &card_asyncint)) != SUCCESS) {
+ sd_err(("%s:INTR EXT getting failed!, ignoring\n", __FUNCTION__));
+ } else {
+ host_asyncint = sdstd_rreg16(sd, SD3_HostCntrl2);
+
+ /* check if supported by host and card */
+ if ((regdata & SD4_MODE) &&
+ (GFIELD(card_asyncint, SDIO_BUS_ASYNCINT_CAP)) &&
+ (GFIELD(sd->caps, CAP_ASYNCINT_SUP))) {
+ /* set enable async int in card */
+ card_asyncint = SFIELD(card_asyncint, SDIO_BUS_ASYNCINT_SEL, 1);
+
+ if ((status = sdstd_card_regwrite (sd, 0,
+ SDIOD_CCCR_INTR_EXTN, 1, card_asyncint)) != SUCCESS)
+ sd_err(("%s:INTR EXT setting failed!, ignoring\n",
+ __FUNCTION__));
+ else {
+ /* set enable async int in host */
+ host_asyncint = SFIELD(host_asyncint,
+ HOSTCtrl2_ASYINT_EN, 1);
+ sdstd_wreg16(sd, SD3_HostCntrl2, host_asyncint);
+ }
+ } else {
+ sd_err(("%s:INTR EXT NOT supported by either host or"
+ "card!, ignoring\n", __FUNCTION__));
+ }
+ }
+ }
+
+ /* Set host side via Host reg */
+ reg8 = sdstd_rreg8(sd, SD_HostCntrl) & ~SD4_MODE;
+ if (new_mode == SDIOH_MODE_SD4)
+ reg8 |= SD4_MODE;
+ sdstd_wreg8(sd, SD_HostCntrl, reg8);
+
+ sd->sd_mode = new_mode;
+
+ return TRUE;
+}
+
+static int
+sdstd_driver_init(sdioh_info_t *sd)
+{
+ sd_trace(("%s\n", __FUNCTION__));
+ sd->sd3_tuning_reqd = FALSE;
+ sd->sd3_tuning_disable = FALSE;
+ if ((sdstd_host_init(sd)) != SUCCESS) {
+ return ERROR;
+ }
+
+ /* Give WL_reset before sending CMD5 to dongle for Revx SDIO3 HC's */
+ if ((sd->controller_type == SDIOH_TYPE_RICOH_R5C822) && (sd->version == HOST_CONTR_VER_3))
+ {
+ sdstd_wreg16(sd, SD3_WL_BT_reset_register, 0x8);
+ OSL_DELAY(sd_delay_value);
+ sdstd_wreg16(sd, SD3_WL_BT_reset_register, 0x0);
+ OSL_DELAY(500000);
+ }
+
+ if (sdstd_client_init(sd) != SUCCESS) {
+ return ERROR;
+ }
+
+ /* if the global cap matched and is SDR 104/50 [if 50 it is reqd] enable tuning. */
+ if ((TRUE != sd3_sw_override1) && SD3_TUNING_REQD(sd, sd_uhsimode)) {
+ sd->sd3_tuning_reqd = TRUE;
+
+ /* init OS structs for tuning */
+ sdstd_3_osinit_tuning(sd);
+
+ /* enable HC tuning interrupt OR timer based on tuning method */
+ if (GFIELD(sd->caps3, CAP3_RETUNING_MODES)) {
+ /* enable both RTReq and timer */
+ sd->intmask |= HC_INTR_RETUNING;
+ sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
+#ifdef BCMSDYIELD
+ if (sd_forcerb)
+ sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
+#endif /* BCMSDYIELD */
+ }
+ }
+
+ return SUCCESS;
+}
+
+static int
+sdstd_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
+{
+ /* read 24 bits and return valid 17 bit addr */
+ int i;
+ uint32 scratch, regdata;
+ uint8 *ptr = (uint8 *)&scratch;
+ for (i = 0; i < 3; i++) {
+ if ((sdstd_card_regread (sd, 0, regaddr, 1, ®data)) != SUCCESS)
+ sd_err(("%s: Can't read!\n", __FUNCTION__));
+
+ *ptr++ = (uint8) regdata;
+ regaddr++;
+ }
+ /* Only the lower 17-bits are valid */
+ scratch = ltoh32(scratch);
+ scratch &= 0x0001FFFF;
+ return (scratch);
+}
+
+static int
+sdstd_card_enablefuncs(sdioh_info_t *sd)
+{
+ int status;
+ uint32 regdata;
+ uint32 fbraddr;
+ uint8 func;
+
+ sd_trace(("%s\n", __FUNCTION__));
+
+ /* Get the Card's common CIS address */
+ sd->com_cis_ptr = sdstd_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
+ sd->func_cis_ptr[0] = sd->com_cis_ptr;
+ sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+ /* Get the Card's function CIS (for each function) */
+ for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
+ func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
+ sd->func_cis_ptr[func] = sdstd_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
+ sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
+ __FUNCTION__, func, sd->func_cis_ptr[func]));
+ }
+
+ /* Enable function 1 on the card */
+ regdata = SDIO_FUNC_ENABLE_1;
+ if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_IOEN, 1, regdata)) != SUCCESS)
+ return status;
+
+ return SUCCESS;
+}
+
+/* Read client card reg */
+static int
+sdstd_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+ int status;
+ uint32 cmd_arg;
+ uint32 rsp5;
+
+#ifdef BCMDBG
+ if (sdstd_rreg16 (sd, SD_ErrorIntrStatus) != 0) {
+ sd_err(("%s: Entering: ErrorintrStatus 0x%x, intstat = 0x%x\n",
+ __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus),
+ sdstd_rreg16(sd, SD_IntrStatus)));
+ }
+#endif
+
+ cmd_arg = 0;
+
+ if ((func == 0) || (regsize == 1)) {
+ cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SDIOH_XFER_TYPE_READ);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
+ cmd_arg = SFIELD(cmd_arg, CMD52_DATA, 0);
+
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg))
+ != SUCCESS)
+ return status;
+
+ sdstd_cmd_getrsp(sd, &rsp5, 1);
+ if (sdstd_rreg16(sd, SD_ErrorIntrStatus) != 0) {
+ sd_err(("%s: 1: ErrorintrStatus 0x%x\n",
+ __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus)));
+ }
+
+ if (GFIELD(rsp5, RSP5_FLAGS) != 0x10)
+ sd_err(("%s: rsp5 flags is 0x%x\t %d\n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func));
+
+ if (GFIELD(rsp5, RSP5_STUFF))
+ sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
+ *data = GFIELD(rsp5, RSP5_DATA);
+
+ sd_data(("%s: Resp data(0x%x)\n", __FUNCTION__, *data));
+ } else {
+ cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, regsize);
+ cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1); /* XXX SDIO spec v 1.10, Sec 5.3 */
+ cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
+ cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_READ);
+
+ sd->data_xfer_count = regsize;
+
+ /* sdstd_cmd_issue() returns with the command complete bit
+ * in the ISR already cleared
+ */
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_53, cmd_arg))
+ != SUCCESS)
+ return status;
+
+ sdstd_cmd_getrsp(sd, &rsp5, 1);
+
+ if (GFIELD(rsp5, RSP5_FLAGS) != 0x10)
+ sd_err(("%s: rsp5 flags is 0x%x\t %d\n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func));
+
+ if (GFIELD(rsp5, RSP5_STUFF))
+ sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
+
+ if (sd->polled_mode) {
+ volatile uint16 int_reg;
+ int retries = RETRIES_LARGE;
+
+ /* Wait for Read Buffer to become ready */
+ do {
+ sdstd_os_yield(sd);
+ int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+ } while (--retries && (GFIELD(int_reg, INTSTAT_BUF_READ_READY) == 0));
+
+ if (!retries) {
+ sd_err(("%s: Timeout on Buf_Read_Ready: "
+ "intStat: 0x%x errint: 0x%x PresentState 0x%x\n",
+ __FUNCTION__, int_reg,
+ sdstd_rreg16(sd, SD_ErrorIntrStatus),
+ sdstd_rreg(sd, SD_PresentState)));
+ sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg);
+ return (ERROR);
+ }
+
+ /* Have Buffer Ready, so clear it and read the data */
+ sdstd_wreg16(sd, SD_IntrStatus, SFIELD(0, INTSTAT_BUF_READ_READY, 1));
+ if (regsize == 2)
+ *data = sdstd_rreg16(sd, SD_BufferDataPort0);
+ else
+ *data = sdstd_rreg(sd, SD_BufferDataPort0);
+
+ sd_data(("%s: Resp data(0x%x)\n", __FUNCTION__, *data));
+ /* Check Status.
+ * After the data is read, the Transfer Complete bit should be on
+ */
+ retries = RETRIES_LARGE;
+ do {
+ int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+ } while (--retries && (GFIELD(int_reg, INTSTAT_XFER_COMPLETE) == 0));
+
+ /* Check for any errors from the data phase */
+ if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg))
+ return ERROR;
+
+ if (!retries) {
+ sd_err(("%s: Timeout on xfer complete: "
+ "intr 0x%04x err 0x%04x state 0x%08x\n",
+ __FUNCTION__, int_reg,
+ sdstd_rreg16(sd, SD_ErrorIntrStatus),
+ sdstd_rreg(sd, SD_PresentState)));
+ return (ERROR);
+ }
+
+ sdstd_wreg16(sd, SD_IntrStatus, SFIELD(0, INTSTAT_XFER_COMPLETE, 1));
+ }
+ }
+ if (sd->polled_mode) {
+ if (regsize == 2)
+ *data &= 0xffff;
+ }
+ return SUCCESS;
+}
+
+bool
+check_client_intr(sdioh_info_t *sd)
+{
+ uint16 raw_int, cur_int, old_int;
+
+ raw_int = sdstd_rreg16(sd, SD_IntrStatus);
+ cur_int = raw_int & sd->intmask;
+
+ if (!cur_int) {
+ /* Not an error -- might share interrupts... */
+ return FALSE;
+ }
+
+ if (GFIELD(cur_int, INTSTAT_CARD_INT)) {
+ unsigned long flags;
+
+ sdstd_os_lock_irqsave(sd, &flags);
+ old_int = sdstd_rreg16(sd, SD_IntrStatusEnable);
+ sdstd_wreg16(sd, SD_IntrStatusEnable, SFIELD(old_int, INTSTAT_CARD_INT, 0));
+ sdstd_os_unlock_irqrestore(sd, &flags);
+
+ if (sd->client_intr_enabled && sd->use_client_ints) {
+ sd->intrcount++;
+ ASSERT(sd->intr_handler);
+ ASSERT(sd->intr_handler_arg);
+ (sd->intr_handler)(sd->intr_handler_arg);
+ } else {
+ sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
+ __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
+ }
+ sdstd_os_lock_irqsave(sd, &flags);
+ old_int = sdstd_rreg16(sd, SD_IntrStatusEnable);
+ sdstd_wreg16(sd, SD_IntrStatusEnable, SFIELD(old_int, INTSTAT_CARD_INT, 1));
+ sdstd_os_unlock_irqrestore(sd, &flags);
+ } else {
+ /* Local interrupt: disable, set flag, and save intrstatus */
+ sdstd_wreg16(sd, SD_IntrSignalEnable, 0);
+ sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, 0);
+ sd->local_intrcount++;
+ sd->got_hcint = TRUE;
+ sd->last_intrstatus = cur_int;
+ }
+
+ return TRUE;
+}
+
+void
+sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err)
+{
+ uint16 int_reg, err_reg;
+ int retries = RETRIES_LARGE;
+
+ do {
+ int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+ err_reg = sdstd_rreg16(sd, SD_ErrorIntrStatus);
+ } while (--retries && !(int_reg & norm) && !(err_reg & err));
+
+ norm |= sd->intmask;
+ if (err_reg & err)
+ norm = SFIELD(norm, INTSTAT_ERROR_INT, 1);
+ sd->last_intrstatus = int_reg & norm;
+}
+
+/* write a client register */
+static int
+sdstd_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
+{
+ int status;
+ uint32 cmd_arg, rsp5, flags;
+
+ cmd_arg = 0;
+
+ if ((func == 0) || (regsize == 1)) {
+ cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
+ cmd_arg = SFIELD(cmd_arg, CMD52_DATA, data & 0xff);
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg))
+ != SUCCESS)
+ return status;
+
+ sdstd_cmd_getrsp(sd, &rsp5, 1);
+ flags = GFIELD(rsp5, RSP5_FLAGS);
+ if (flags && (flags != 0x10))
+ sd_err(("%s: rsp5.rsp5.flags = 0x%x, expecting 0x10\n",
+ __FUNCTION__, flags));
+ }
+ else {
+ cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, regsize);
+ /* XXX SDIO spec v 1.10, Sec 5.3 Not FIFO */
+ cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1);
+ cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
+ cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
+
+ sd->data_xfer_count = regsize;
+
+ /* sdstd_cmd_issue() returns with the command complete bit
+ * in the ISR already cleared
+ */
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_53, cmd_arg))
+ != SUCCESS)
+ return status;
+
+ sdstd_cmd_getrsp(sd, &rsp5, 1);
+
+ if (GFIELD(rsp5, RSP5_FLAGS) != 0x10)
+ sd_err(("%s: rsp5 flags = 0x%x, expecting 0x10\n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS)));
+ if (GFIELD(rsp5, RSP5_STUFF))
+ sd_err(("%s: rsp5 stuff is 0x%x: expecting 0\n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
+
+ if (sd->polled_mode) {
+ uint16 int_reg;
+ int retries = RETRIES_LARGE;
+
+ /* Wait for Write Buffer to become ready */
+ do {
+ int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+ } while (--retries && (GFIELD(int_reg, INTSTAT_BUF_WRITE_READY) == 0));
+
+ if (!retries) {
+ sd_err(("%s: Timeout on Buf_Write_Ready: intStat: 0x%x "
+ "errint: 0x%x PresentState 0x%x\n",
+ __FUNCTION__, int_reg,
+ sdstd_rreg16(sd, SD_ErrorIntrStatus),
+ sdstd_rreg(sd, SD_PresentState)));
+ sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg);
+ return (ERROR);
+ }
+ /* Clear Write Buf Ready bit */
+ int_reg = 0;
+ int_reg = SFIELD(int_reg, INTSTAT_BUF_WRITE_READY, 1);
+ sdstd_wreg16(sd, SD_IntrStatus, int_reg);
+
+ /* At this point we have Buffer Ready, so write the data */
+ if (regsize == 2)
+ sdstd_wreg16(sd, SD_BufferDataPort0, (uint16) data);
+ else
+ sdstd_wreg(sd, SD_BufferDataPort0, data);
+
+ /* Wait for Transfer Complete */
+ retries = RETRIES_LARGE;
+ do {
+ int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+ } while (--retries && (GFIELD(int_reg, INTSTAT_XFER_COMPLETE) == 0));
+
+ /* Check for any errors from the data phase */
+ if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg))
+ return ERROR;
+
+ if (retries == 0) {
+ sd_err(("%s: Timeout for xfer complete; State = 0x%x, "
+ "intr state=0x%x, Errintstatus 0x%x rcnt %d, tcnt %d\n",
+ __FUNCTION__, sdstd_rreg(sd, SD_PresentState),
+ int_reg, sdstd_rreg16(sd, SD_ErrorIntrStatus),
+ sd->r_cnt, sd->t_cnt));
+ }
+ /* Clear the status bits */
+ sdstd_wreg16(sd, SD_IntrStatus, SFIELD(int_reg, INTSTAT_CARD_INT, 0));
+ }
+ }
+ return SUCCESS;
+}
+
+void
+sdstd_cmd_getrsp(sdioh_info_t *sd, uint32 *rsp_buffer, int count /* num 32 bit words */)
+{
+ int rsp_count;
+ int respaddr = SD_Response0;
+
+ if (count > 4)
+ count = 4;
+
+ for (rsp_count = 0; rsp_count < count; rsp_count++) {
+ *rsp_buffer++ = sdstd_rreg(sd, respaddr);
+ respaddr += 4;
+ }
+}
+
+/*
+ Note: options: 0 - default
+ 1 - tuning option: Means that, this cmd issue is as a part
+ of tuning. So no need to check the start tuning function.
+*/
+static int
+sdstd_cmd_issue(sdioh_info_t *sdioh_info, bool use_dma, uint32 cmd, uint32 arg)
+{
+ uint16 cmd_reg;
+ int retries;
+ uint32 cmd_arg;
+ uint16 xfer_reg = 0;
+
+#ifdef BCMDBG
+ if (sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus) != 0) {
+ sd_err(("%s: Entering: ErrorIntrStatus 0x%x, Expecting 0\n",
+ __FUNCTION__, sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus)));
+ }
+#endif
+
+ if ((sdioh_info->sd_mode == SDIOH_MODE_SPI) &&
+ ((cmd == SDIOH_CMD_3) || (cmd == SDIOH_CMD_7) || (cmd == SDIOH_CMD_15))) {
+ sd_err(("%s: Cmd %d is not for SPI\n", __FUNCTION__, cmd));
+ return ERROR;
+ }
+
+ retries = RETRIES_SMALL;
+ while ((GFIELD(sdstd_rreg(sdioh_info, SD_PresentState), PRES_CMD_INHIBIT)) && --retries) {
+ if (retries == RETRIES_SMALL)
+ sd_err(("%s: Waiting for Command Inhibit cmd = %d 0x%x\n",
+ __FUNCTION__, cmd, sdstd_rreg(sdioh_info, SD_PresentState)));
+ }
+ if (!retries) {
+ sd_err(("%s: Command Inhibit timeout\n", __FUNCTION__));
+ if (trap_errs)
+ ASSERT(0);
+ return ERROR;
+ }
+
+ cmd_reg = 0;
+ switch (cmd) {
+ case SDIOH_CMD_0: /* Set Card to Idle State - No Response */
+ sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
+ cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_NONE);
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+ break;
+
+ case SDIOH_CMD_3: /* Ask card to send RCA - Response R6 */
+ sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
+ cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+ break;
+
+ case SDIOH_CMD_5: /* Send Operation condition - Response R4 */
+ sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
+ cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+ break;
+
+ case SDIOH_CMD_7: /* Select card - Response R1 */
+ sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
+ cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+ break;
+
+ case SDIOH_CMD_14: /* eSD Sleep - Response R1 */
+ case SDIOH_CMD_11: /* Select card - Response R1 */
+ sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
+ cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+ break;
+
+ case SDIOH_CMD_15: /* Set card to inactive state - Response None */
+ sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
+ cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_NONE);
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+ break;
+
+ case SDIOH_CMD_19: /* clock tuning - Response R1 */
+ sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
+ cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+ /* Host controller reads 64 byte magic pattern from card
+ * Hence Direction = 1 ( READ )
+ */
+ xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 1);
+ break;
+
+ case SDIOH_CMD_52: /* IO R/W Direct (single byte) - Response R5 */
+
+ sd_data(("%s: CMD52 func(%d) addr(0x%x) %s data(0x%x)\n",
+ __FUNCTION__,
+ GFIELD(arg, CMD52_FUNCTION),
+ GFIELD(arg, CMD52_REG_ADDR),
+ GFIELD(arg, CMD52_RW_FLAG) ? "W" : "R",
+ GFIELD(arg, CMD52_DATA)));
+
+ cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+ break;
+
+ case SDIOH_CMD_53: /* IO R/W Extended (multiple bytes/blocks) */
+
+ sd_data(("%s: CMD53 func(%d) addr(0x%x) %s mode(%s) cnt(%d), %s\n",
+ __FUNCTION__,
+ GFIELD(arg, CMD53_FUNCTION),
+ GFIELD(arg, CMD53_REG_ADDR),
+ GFIELD(arg, CMD53_RW_FLAG) ? "W" : "R",
+ GFIELD(arg, CMD53_BLK_MODE) ? "Block" : "Byte",
+ GFIELD(arg, CMD53_BYTE_BLK_CNT),
+ GFIELD(arg, CMD53_OP_CODE) ? "Incrementing addr" : "Single addr"));
+
+ cmd_arg = arg;
+ xfer_reg = 0;
+
+ cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+
+ use_dma = USE_DMA(sdioh_info) && GFIELD(cmd_arg, CMD53_BLK_MODE);
+
+ if (GFIELD(cmd_arg, CMD53_BLK_MODE)) {
+ uint16 blocksize;
+ uint16 blockcount;
+ int func;
+
+ ASSERT(sdioh_info->sd_blockmode);
+
+ func = GFIELD(cmd_arg, CMD53_FUNCTION);
+ blocksize = MIN((int)sdioh_info->data_xfer_count,
+ sdioh_info->client_block_size[func]);
+ blockcount = GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT);
+
+ /* data_xfer_cnt is already setup so that for multiblock mode,
+ * it is the entire buffer length. For non-block or single block,
+ * it is < 64 bytes
+ */
+ if (use_dma) {
+ switch (sdioh_info->sd_dma_mode) {
+ case DMA_MODE_SDMA:
+ sd_dma(("%s: SDMA: SysAddr reg was 0x%x now 0x%x\n",
+ __FUNCTION__, sdstd_rreg(sdioh_info, SD_SysAddr),
+ (uint32)sdioh_info->dma_phys));
+ sdstd_wreg(sdioh_info, SD_SysAddr, sdioh_info->dma_phys);
+ break;
+ case DMA_MODE_ADMA1:
+ case DMA_MODE_ADMA2:
+ sd_dma(("%s: ADMA: Using ADMA\n", __FUNCTION__));
+#ifdef BCMSDIOH_TXGLOM
+ /* multi-descriptor is currently used only for hc3 */
+ if ((sdioh_info->glom_info.count != 0) &&
+ (sdioh_info->txglom_mode == SDPCM_TXGLOM_MDESC)) {
+ uint32 i = 0;
+ for (i = 0;
+ i < sdioh_info->glom_info.count-1;
+ i++) {
+ glom_buf_t *glom_info;
+ glom_info = &(sdioh_info->glom_info);
+ sd_create_adma_descriptor(sdioh_info,
+ i,
+ glom_info->dma_phys_arr[i],
+ glom_info->nbytes[i],
+ ADMA2_ATTRIBUTE_VALID |
+ ADMA2_ATTRIBUTE_ACT_TRAN);
+ }
+
+ sd_create_adma_descriptor(sdioh_info,
+ i,
+ sdioh_info->glom_info.dma_phys_arr[i],
+ sdioh_info->glom_info.nbytes[i],
+ ADMA2_ATTRIBUTE_VALID |
+ ADMA2_ATTRIBUTE_END |
+ ADMA2_ATTRIBUTE_INT |
+ ADMA2_ATTRIBUTE_ACT_TRAN);
+ } else
+#endif /* BCMSDIOH_TXGLOM */
+ {
+ sd_create_adma_descriptor(sdioh_info, 0,
+ sdioh_info->dma_phys, blockcount*blocksize,
+ ADMA2_ATTRIBUTE_VALID | ADMA2_ATTRIBUTE_END |
+ ADMA2_ATTRIBUTE_INT | ADMA2_ATTRIBUTE_ACT_TRAN);
+ }
+ /* Dump descriptor if DMA debugging is enabled. */
+ if (sd_msglevel & SDH_DMA_VAL) {
+ sd_dump_adma_dscr(sdioh_info);
+ }
+
+ sdstd_wreg(sdioh_info, SD_ADMA_SysAddr,
+ sdioh_info->adma2_dscr_phys);
+ break;
+ default:
+ sd_err(("%s: unsupported DMA mode %d.\n",
+ __FUNCTION__, sdioh_info->sd_dma_mode));
+ break;
+ }
+ }
+
+ sd_trace(("%s: Setting block count %d, block size %d bytes\n",
+ __FUNCTION__, blockcount, blocksize));
+ sdstd_wreg16(sdioh_info, SD_BlockSize, blocksize);
+ sdstd_wreg16(sdioh_info, SD_BlockCount, blockcount);
+
+ xfer_reg = SFIELD(xfer_reg, XFER_DMA_ENABLE, use_dma);
+
+ if (sdioh_info->client_block_size[func] != blocksize)
+ set_client_block_size(sdioh_info, 1, blocksize);
+
+ if (blockcount > 1) {
+ xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 1);
+ xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 1);
+ xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0);
+ } else {
+ xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 0);
+ xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 0);
+ xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0);
+ }
+
+ if (GFIELD(cmd_arg, CMD53_RW_FLAG) == SDIOH_XFER_TYPE_READ)
+ xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 1);
+ else
+ xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 0);
+
+ retries = RETRIES_SMALL;
+ while (GFIELD(sdstd_rreg(sdioh_info, SD_PresentState),
+ PRES_DAT_INHIBIT) && --retries)
+ sd_err(("%s: Waiting for Data Inhibit cmd = %d\n",
+ __FUNCTION__, cmd));
+ if (!retries) {
+ sd_err(("%s: Data Inhibit timeout\n", __FUNCTION__));
+ if (trap_errs)
+ ASSERT(0);
+ return ERROR;
+ }
+
+ /* Consider deferring this write to the comment below "Deferred Write" */
+ sdstd_wreg16(sdioh_info, SD_TransferMode, xfer_reg);
+
+ } else { /* Non block mode */
+ uint16 bytes = GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT);
+ /* The byte/block count field only has 9 bits,
+ * so, to do a 512-byte bytemode transfer, this
+ * field will contain 0, but we need to tell the
+ * controller we're transferring 512 bytes.
+ */
+ if (bytes == 0) bytes = 512;
+
+ if (use_dma)
+ sdstd_wreg(sdioh_info, SD_SysAddr, sdioh_info->dma_phys);
+
+ /* PCI: Transfer Mode register 0x0c */
+ xfer_reg = SFIELD(xfer_reg, XFER_DMA_ENABLE, bytes <= 4 ? 0 : use_dma);
+ xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0);
+ if (GFIELD(cmd_arg, CMD53_RW_FLAG) == SDIOH_XFER_TYPE_READ)
+ xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 1);
+ else
+ xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 0);
+ /* See table 2-8 Host Controller spec ver 1.00 */
+ xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 0); /* Dont care */
+ xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 0);
+
+ sdstd_wreg16(sdioh_info, SD_BlockSize, bytes);
+
+ /* XXX This should be a don't care but Arasan needs it
+ * to be one. Its fixed in later versions (but they
+ * don't have version numbers, sigh).
+ */
+ sdstd_wreg16(sdioh_info, SD_BlockCount, 1);
+
+ retries = RETRIES_SMALL;
+ while (GFIELD(sdstd_rreg(sdioh_info, SD_PresentState),
+ PRES_DAT_INHIBIT) && --retries)
+ sd_err(("%s: Waiting for Data Inhibit cmd = %d\n",
+ __FUNCTION__, cmd));
+ if (!retries) {
+ sd_err(("%s: Data Inhibit timeout\n", __FUNCTION__));
+ if (trap_errs)
+ ASSERT(0);
+ return ERROR;
+ }
+
+ /* Consider deferring this write to the comment below "Deferred Write" */
+ sdstd_wreg16(sdioh_info, SD_TransferMode, xfer_reg);
+ }
+ break;
+
+ default:
+ sd_err(("%s: Unknown command\n", __FUNCTION__));
+ return ERROR;
+ }
+
+ if (sdioh_info->sd_mode == SDIOH_MODE_SPI) {
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
+ }
+
+ /* Setup and issue the SDIO command */
+ sdstd_wreg(sdioh_info, SD_Arg0, arg);
+
+ /* Deferred Write
+ * Consider deferring the two writes above until this point in the code.
+ * The following would do one 32 bit write.
+ *
+ * {
+ * uint32 tmp32 = cmd_reg << 16;
+ * tmp32 |= xfer_reg;
+ * sdstd_wreg(sdioh_info, SD_TransferMode, tmp32);
+ * }
+ */
+
+ /* Alternate to Deferred Write START */
+
+ /* In response to CMD19 card sends 64 byte magic pattern.
+ * So SD_BlockSize = 64 & SD_BlockCount = 1
+ */
+ if (GFIELD(cmd_reg, CMD_INDEX) == SDIOH_CMD_19) {
+ sdstd_wreg16(sdioh_info, SD_TransferMode, xfer_reg);
+ sdstd_wreg16(sdioh_info, SD_BlockSize, 64);
+ sdstd_wreg16(sdioh_info, SD_BlockCount, 1);
+ }
+ sdstd_wreg16(sdioh_info, SD_Command, cmd_reg);
+
+ /* Alternate to Deferred Write END */
+
+ /* If we are in polled mode, wait for the command to complete.
+ * In interrupt mode, return immediately. The calling function will
+ * know that the command has completed when the CMDATDONE interrupt
+ * is asserted
+ */
+ if (sdioh_info->polled_mode) {
+ uint16 int_reg = 0;
+ retries = RETRIES_LARGE;
+
+ /* For CMD19 no need to wait for cmd completion */
+ if (GFIELD(cmd_reg, CMD_INDEX) == SDIOH_CMD_19)
+ return SUCCESS;
+
+ do {
+ int_reg = sdstd_rreg16(sdioh_info, SD_IntrStatus);
+ sdstd_os_yield(sdioh_info);
+ } while (--retries &&
+ (GFIELD(int_reg, INTSTAT_ERROR_INT) == 0) &&
+ (GFIELD(int_reg, INTSTAT_CMD_COMPLETE) == 0));
+
+ if (!retries) {
+ sd_err(("%s: CMD_COMPLETE timeout: intrStatus: 0x%x "
+ "error stat 0x%x state 0x%x\n",
+ __FUNCTION__, int_reg,
+ sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus),
+ sdstd_rreg(sdioh_info, SD_PresentState)));
+
+ /* Attempt to reset CMD line when we get a CMD timeout */
+ sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1));
+ retries = RETRIES_LARGE;
+ do {
+ sd_trace(("%s: waiting for CMD line reset\n", __FUNCTION__));
+ } while ((GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset),
+ SW_RESET_CMD)) && retries--);
+
+ if (!retries) {
+ sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__));
+ }
+
+ if (trap_errs)
+ ASSERT(0);
+ return (ERROR);
+ }
+
+ /* Clear Command Complete interrupt */
+ int_reg = SFIELD(0, INTSTAT_CMD_COMPLETE, 1);
+ sdstd_wreg16(sdioh_info, SD_IntrStatus, int_reg);
+
+ /* Check for Errors */
+ if (sdstd_check_errs(sdioh_info, cmd, arg)) {
+ if (trap_errs)
+ ASSERT(0);
+ return ERROR;
+ }
+ }
+ return SUCCESS;
+}
+
+/*
+ * XXX On entry: If single block or non-block, buffersize <= blocksize.
+ * If Mulitblock, buffersize is unlimited.
+ * Question is how to handle the leftovers in either single or multiblock.
+ * I think the caller should break the buffer up so this routine will always
+ * use blocksize == buffersize to handle the end piece of the buffer
+ */
+
+static int
+sdstd_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, uint32 addr, int nbytes, uint32 *data)
+{
+ int retval = SUCCESS;
+ int status;
+ uint32 cmd_arg;
+ uint32 rsp5;
+ uint16 int_reg, int_bit;
+ uint flags;
+ int num_blocks, blocksize;
+ bool local_blockmode, local_dma;
+ bool read = rw == SDIOH_READ ? 1 : 0;
+ bool local_yield = FALSE;
+#ifdef BCMSDIOH_TXGLOM
+ uint32 i;
+ uint8 *localbuf = NULL;
+#endif
+
+ ASSERT(nbytes);
+
+ cmd_arg = 0;
+
+ sd_data(("%s: %s 53 addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n",
+ __FUNCTION__, read ? "Rd" : "Wr", addr, nbytes, sd->r_cnt, sd->t_cnt));
+
+ if (read) sd->r_cnt++; else sd->t_cnt++;
+
+ local_blockmode = sd->sd_blockmode;
+ local_dma = USE_DMA(sd);
+
+#ifdef BCMSDIOH_TXGLOM
+ /* If multiple buffers are there, then calculate the nbytes from that */
+ if (!read && (func == SDIO_FUNC_2) && (sd->glom_info.count != 0)) {
+ uint32 ii;
+ nbytes = 0;
+ for (ii = 0; ii < sd->glom_info.count; ii++) {
+ nbytes += sd->glom_info.nbytes[ii];
+ }
+ ASSERT(nbytes <= sd->alloced_dma_size);
+ }
+#endif
+
+ /* Don't bother with block mode on small xfers */
+ if (nbytes < sd->client_block_size[func]) {
+ sd_data(("setting local blockmode to false: nbytes (%d) != block_size (%d)\n",
+ nbytes, sd->client_block_size[func]));
+ local_blockmode = FALSE;
+ local_dma = FALSE;
+#ifdef BCMSDIOH_TXGLOM
+ /* In glommed case, create a single pkt from multiple pkts */
+ if (!read && (func == SDIO_FUNC_2) && (sd->glom_info.count != 0)) {
+ uint32 offset = 0;
+ localbuf = (uint8 *)MALLOC(sd->osh, nbytes);
+ data = (uint32 *)localbuf;
+ for (i = 0; i < sd->glom_info.count; i++) {
+ bcopy(sd->glom_info.dma_buf_arr[i],
+ ((uint8 *)data + offset),
+ sd->glom_info.nbytes[i]);
+ offset += sd->glom_info.nbytes[i];
+ }
+ }
+#endif
+ }
+
+ if (local_blockmode) {
+ blocksize = MIN(sd->client_block_size[func], nbytes);
+ num_blocks = nbytes/blocksize;
+ cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, num_blocks);
+ cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 1);
+ } else {
+ num_blocks = 1;
+ blocksize = nbytes;
+ cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, nbytes);
+ cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
+ }
+
+ if (local_dma && !read) {
+#ifdef BCMSDIOH_TXGLOM
+ if ((func == SDIO_FUNC_2) && (sd->glom_info.count != 0)) {
+ /* In case of hc ver 2 DMA_MAP may not work properly due to 4K alignment
+ * requirements. So copying pkt to 4K aligned pre-allocated pkt.
+ * Total length should not cross the pre-alloced memory size
+ */
+ if (sd->txglom_mode == SDPCM_TXGLOM_CPY) {
+ uint32 total_bytes = 0;
+ for (i = 0; i < sd->glom_info.count; i++) {
+ bcopy(sd->glom_info.dma_buf_arr[i],
+ (uint8 *)sd->dma_buf + total_bytes,
+ sd->glom_info.nbytes[i]);
+ total_bytes += sd->glom_info.nbytes[i];
+ }
+ sd_sync_dma(sd, read, total_bytes);
+ }
+ } else
+#endif /* BCMSDIOH_TXGLOM */
+ {
+ bcopy(data, sd->dma_buf, nbytes);
+ sd_sync_dma(sd, read, nbytes);
+ }
+ }
+
+ if (fifo)
+ cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 0); /* XXX SDIO spec v 1.10, Sec 5.3 */
+ else
+ cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1); /* XXX SDIO spec v 1.10, Sec 5.3 */
+
+ cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, addr);
+ if (read)
+ cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_READ);
+ else
+ cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
+
+ sd->data_xfer_count = nbytes;
+
+ /* sdstd_cmd_issue() returns with the command complete bit
+ * in the ISR already cleared
+ */
+ if ((status = sdstd_cmd_issue(sd, local_dma, SDIOH_CMD_53, cmd_arg)) != SUCCESS) {
+ sd_err(("%s: cmd_issue failed for %s\n", __FUNCTION__, (read ? "read" : "write")));
+ retval = status;
+ goto done;
+ }
+
+ sdstd_cmd_getrsp(sd, &rsp5, 1);
+
+ if ((flags = GFIELD(rsp5, RSP5_FLAGS)) != 0x10) {
+ sd_err(("%s: Rsp5: nbytes %d, dma %d blockmode %d, read %d "
+ "numblocks %d, blocksize %d\n",
+ __FUNCTION__, nbytes, local_dma, local_dma, read, num_blocks, blocksize));
+
+ if (flags & 1)
+ sd_err(("%s: rsp5: Command not accepted: arg out of range 0x%x, "
+ "bytes %d dma %d\n",
+ __FUNCTION__, flags, GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT),
+ GFIELD(cmd_arg, CMD53_BLK_MODE)));
+ if (flags & 0x8)
+ sd_err(("%s: Rsp5: General Error\n", __FUNCTION__));
+
+ sd_err(("%s: rsp5 flags = 0x%x, expecting 0x10 returning error\n",
+ __FUNCTION__, flags));
+ if (trap_errs)
+ ASSERT(0);
+ retval = ERROR;
+ goto done;
+ }
+
+ if (GFIELD(rsp5, RSP5_STUFF))
+ sd_err(("%s: rsp5 stuff is 0x%x: expecting 0\n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
+
+#ifdef BCMSDYIELD
+ local_yield = sd_yieldcpu && ((uint)nbytes >= sd_minyield);
+#endif
+
+ if (!local_dma) {
+ int bytes, ii;
+ uint32 tmp;
+
+ for (ii = 0; ii < num_blocks; ii++) {
+ int words;
+
+ /* Decide which status bit we're waiting for */
+ if (read)
+ int_bit = SFIELD(0, INTSTAT_BUF_READ_READY, 1);
+ else
+ int_bit = SFIELD(0, INTSTAT_BUF_WRITE_READY, 1);
+
+ /* If not on, wait for it (or for xfer error) */
+ int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+ if (!(int_reg & int_bit)) {
+ status = sdstd_waitbits(sd, int_bit, ERRINT_TRANSFER_ERRS,
+ local_yield, &int_reg);
+ switch (status) {
+ case -1:
+ sd_err(("%s: pio interrupted\n", __FUNCTION__));
+ retval = ERROR;
+ goto done;
+ case -2:
+ sd_err(("%s: pio timeout waiting for interrupt\n",
+ __FUNCTION__));
+ retval = ERROR;
+ goto done;
+ }
+ }
+#ifdef BCMSLTGT
+ /* int_reg = sdstd_rreg16(sd, SD_IntrStatus); */
+#endif
+ /* Confirm we got the bit w/o error */
+ if (!(int_reg & int_bit) || GFIELD(int_reg, INTSTAT_ERROR_INT)) {
+ sd_err(("%s: Error or timeout for Buf_%s_Ready: intStat: 0x%x "
+ "errint: 0x%x PresentState 0x%x\n",
+ __FUNCTION__, read ? "Read" : "Write", int_reg,
+ sdstd_rreg16(sd, SD_ErrorIntrStatus),
+ sdstd_rreg(sd, SD_PresentState)));
+ sdstd_dumpregs(sd);
+ sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg);
+ retval = ERROR;
+ goto done;
+ }
+
+ /* Clear Buf Ready bit */
+ sdstd_wreg16(sd, SD_IntrStatus, int_bit);
+
+ /* At this point we have Buffer Ready, write the data 4 bytes at a time */
+ for (words = blocksize/4; words; words--) {
+ if (read)
+ *data = sdstd_rreg(sd, SD_BufferDataPort0);
+ else
+ sdstd_wreg(sd, SD_BufferDataPort0, *data);
+ data++;
+ }
+
+ /* XXX
+ * Handle < 4 bytes. wlc_pio.c currently (as of 12/20/05) truncates buflen
+ * to be evenly divisible by 4. However dongle passes arbitrary lengths,
+ * so handle it here
+ */
+ bytes = blocksize % 4;
+
+ /* If no leftover bytes, go to next block */
+ if (!bytes)
+ continue;
+
+ switch (bytes) {
+ case 1:
+ /* R/W 8 bits */
+ if (read)
+ *(data++) = (uint32)(sdstd_rreg8(sd, SD_BufferDataPort0));
+ else
+ sdstd_wreg8(sd, SD_BufferDataPort0,
+ (uint8)(*(data++) & 0xff));
+ break;
+ case 2:
+ /* R/W 16 bits */
+ if (read)
+ *(data++) = (uint32)sdstd_rreg16(sd, SD_BufferDataPort0);
+ else
+ sdstd_wreg16(sd, SD_BufferDataPort0, (uint16)(*(data++)));
+ break;
+ case 3:
+ /* R/W 24 bits:
+ * SD_BufferDataPort0[0-15] | SD_BufferDataPort1[16-23]
+ */
+ if (read) {
+ tmp = (uint32)sdstd_rreg16(sd, SD_BufferDataPort0);
+ tmp |= ((uint32)(sdstd_rreg8(sd,
+ SD_BufferDataPort1)) << 16);
+ *(data++) = tmp;
+ } else {
+ tmp = *(data++);
+ sdstd_wreg16(sd, SD_BufferDataPort0, (uint16)tmp & 0xffff);
+ sdstd_wreg8(sd, SD_BufferDataPort1,
+ (uint8)((tmp >> 16) & 0xff));
+ }
+ break;
+ default:
+ sd_err(("%s: Unexpected bytes leftover %d\n",
+ __FUNCTION__, bytes));
+ ASSERT(0);
+ break;
+ }
+ }
+ } /* End PIO processing */
+
+ /* Wait for Transfer Complete or Transfer Error */
+ int_bit = SFIELD(0, INTSTAT_XFER_COMPLETE, 1);
+
+ /* If not on, wait for it (or for xfer error) */
+ int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+ if (!(int_reg & int_bit)) {
+ status = sdstd_waitbits(sd, int_bit, ERRINT_TRANSFER_ERRS, local_yield, &int_reg);
+ switch (status) {
+ case -1:
+ sd_err(("%s: interrupted\n", __FUNCTION__));
+ retval = ERROR;
+ goto done;
+ case -2:
+ sd_err(("%s: timeout waiting for interrupt\n", __FUNCTION__));
+ retval = ERROR;
+ goto done;
+ }
+ }
+
+ /* Check for any errors from the data phase */
+ if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg)) {
+ retval = ERROR;
+ goto done;
+ }
+
+ /* May have gotten a software timeout if not blocking? */
+ int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+ if (!(int_reg & int_bit)) {
+ sd_err(("%s: Error or Timeout for xfer complete; %s, dma %d, State 0x%08x, "
+ "intr 0x%04x, Err 0x%04x, len = %d, rcnt %d, tcnt %d\n",
+ __FUNCTION__, read ? "R" : "W", local_dma,
+ sdstd_rreg(sd, SD_PresentState), int_reg,
+ sdstd_rreg16(sd, SD_ErrorIntrStatus), nbytes,
+ sd->r_cnt, sd->t_cnt));
+ sdstd_dumpregs(sd);
+ retval = ERROR;
+ goto done;
+ }
+
+ /* Clear the status bits */
+ int_reg = int_bit;
+ if (local_dma) {
+ /* DMA Complete */
+ /* XXX Step 14, Section 3.6.2.2 Stnd Cntrlr Spec */
+ /* Reads in particular don't have DMA_COMPLETE set */
+ int_reg = SFIELD(int_reg, INTSTAT_DMA_INT, 1);
+ }
+ sdstd_wreg16(sd, SD_IntrStatus, int_reg);
+
+ /* Fetch data */
+ if (local_dma && read) {
+ sd_sync_dma(sd, read, nbytes);
+ bcopy(sd->dma_buf, data, nbytes);
+ }
+
+done:
+#ifdef BCMSDIOH_TXGLOM
+ if (localbuf)
+ MFREE(sd->osh, localbuf, nbytes);
+#endif
+ return retval;
+}
+
+static int
+set_client_block_size(sdioh_info_t *sd, int func, int block_size)
+{
+ int base;
+ int err = 0;
+
+ sd_err(("%s: Setting block size %d, func %d\n", __FUNCTION__, block_size, func));
+ sd->client_block_size[func] = block_size;
+
+ /* Set the block size in the SDIO Card register */
+ base = func * SDIOD_FBR_SIZE;
+ err = sdstd_card_regwrite(sd, 0, base+SDIOD_CCCR_BLKSIZE_0, 1, block_size & 0xff);
+ if (!err) {
+ err = sdstd_card_regwrite(sd, 0, base+SDIOD_CCCR_BLKSIZE_1, 1,
+ (block_size >> 8) & 0xff);
+ }
+
+ /* Do not set the block size in the SDIO Host register, that
+ * is func dependent and will get done on an individual
+ * transaction basis
+ */
+
+ return (err ? BCME_SDIO_ERROR : 0);
+}
+
+/* Reset and re-initialize the device */
+int
+sdioh_sdio_reset(sdioh_info_t *si)
+{
+ uint8 hreg;
+
+ /* Reset the attached device (use slower clock for safety) */
+ if (!sdstd_start_clock(si, 128)) {
+ sd_err(("set clock failed!\n"));
+ return ERROR;
+ }
+ sdstd_reset(si, 0, 1);
+
+ /* Reset portions of the host state accordingly */
+ hreg = sdstd_rreg8(si, SD_HostCntrl);
+ hreg = SFIELD(hreg, HOST_HI_SPEED_EN, 0);
+ hreg = SFIELD(hreg, HOST_DATA_WIDTH, 0);
+ si->sd_mode = SDIOH_MODE_SD1;
+
+ /* Reinitialize the card */
+ si->card_init_done = FALSE;
+ return sdstd_client_init(si);
+}
+
+#ifdef BCMINTERNAL
+#ifdef NOTUSED
+static void
+cis_fetch(sdioh_info_t *sd, int func, char *data, int len)
+{
+ int count;
+ int offset;
+ char *end = data + len;
+ uint32 foo;
+
+ for (count = 0; count < 512 && data < end; count++) {
+ offset = sd->func_cis_ptr[func] + count;
+ if (sdstd_card_regread (sd, func, offset, 1, &foo) < 0) {
+ sd_err(("%s: regread failed\n", __FUNCTION__));
+ return;
+ }
+ data += sprintf(data, "%.2x ", foo & 0xff);
+ if (((count+1) % 16) == 0)
+ data += sprintf(data, "\n");
+ }
+}
+#endif /* NOTUSED */
+#endif /* BCMINTERNAL */
+
+static void
+sd_map_dma(sdioh_info_t * sd)
+{
+
+ int alloced;
+ void *va;
+ uint dma_buf_size = SD_PAGE;
+
+#ifdef BCMSDIOH_TXGLOM
+ /* There is no alignment requirement for HC3 */
+ if ((sd->version == HOST_CONTR_VER_3) && sd_txglom) {
+ /* Max glom packet length is 64KB */
+ dma_buf_size = SD_PAGE * 16;
+ }
+#endif
+
+ alloced = 0;
+ if ((va = DMA_ALLOC_CONSISTENT(sd->osh, dma_buf_size, SD_PAGE_BITS, &alloced,
+ &sd->dma_start_phys, 0x12)) == NULL) {
+ sd->sd_dma_mode = DMA_MODE_NONE;
+ sd->dma_start_buf = 0;
+ sd->dma_buf = (void *)0;
+ sd->dma_phys = 0;
+ sd->alloced_dma_size = 0;
+ sd_err(("%s: DMA_ALLOC failed. Disabling DMA support.\n", __FUNCTION__));
+ } else {
+ sd->dma_start_buf = va;
+ sd->dma_buf = (void *)ROUNDUP((uintptr)va, SD_PAGE);
+ sd->dma_phys = ROUNDUP((sd->dma_start_phys), SD_PAGE);
+ sd->alloced_dma_size = alloced;
+ sd_err(("%s: Mapped DMA Buffer %dbytes @virt/phys: %p/0x%x-0x%x\n",
+ __FUNCTION__, sd->alloced_dma_size, sd->dma_buf,
+ (uint)PHYSADDRHI(sd->dma_phys), (uint)PHYSADDRLO(sd->dma_phys)));
+ sd_fill_dma_data_buf(sd, 0xA5);
+ }
+
+ if ((va = DMA_ALLOC_CONSISTENT(sd->osh, SD_PAGE, SD_PAGE_BITS, &alloced,
+ &sd->adma2_dscr_start_phys, 0x12)) == NULL) {
+ sd->sd_dma_mode = DMA_MODE_NONE;
+ sd->adma2_dscr_start_buf = 0;
+ sd->adma2_dscr_buf = (void *)0;
+ sd->adma2_dscr_phys = 0;
+ sd->alloced_adma2_dscr_size = 0;
+ sd_err(("%s: DMA_ALLOC failed for descriptor buffer. "
+ "Disabling DMA support.\n", __FUNCTION__));
+ } else {
+ sd->adma2_dscr_start_buf = va;
+ sd->adma2_dscr_buf = (void *)ROUNDUP((uintptr)va, SD_PAGE);
+ sd->adma2_dscr_phys = ROUNDUP((sd->adma2_dscr_start_phys), SD_PAGE);
+ sd->alloced_adma2_dscr_size = alloced;
+ sd_err(("%s: Mapped ADMA2 Descriptor Buffer %dbytes @virt/phys: %p/0x%x-0x%x\n",
+ __FUNCTION__, sd->alloced_adma2_dscr_size, sd->adma2_dscr_buf,
+ (uint)PHYSADDRHI(sd->adma2_dscr_phys),
+ (uint)PHYSADDRLO(sd->adma2_dscr_phys)));
+ sd_clear_adma_dscr_buf(sd);
+ }
+}
+
+static void
+sd_unmap_dma(sdioh_info_t * sd)
+{
+ if (sd->dma_start_buf) {
+ DMA_FREE_CONSISTENT(sd->osh, sd->dma_start_buf, sd->alloced_dma_size,
+ sd->dma_start_phys, 0x12);
+ }
+
+ if (sd->adma2_dscr_start_buf) {
+ DMA_FREE_CONSISTENT(sd->osh, sd->adma2_dscr_start_buf, sd->alloced_adma2_dscr_size,
+ sd->adma2_dscr_start_phys, 0x12);
+ }
+}
+
+static void
+sd_clear_adma_dscr_buf(sdioh_info_t *sd)
+{
+ bzero((char *)sd->adma2_dscr_buf, SD_PAGE);
+ sd_dump_adma_dscr(sd);
+}
+
+static void
+sd_fill_dma_data_buf(sdioh_info_t *sd, uint8 data)
+{
+ memset((char *)sd->dma_buf, data, SD_PAGE);
+}
+
+static void
+sd_create_adma_descriptor(sdioh_info_t *sd, uint32 index,
+ uint32 addr_phys, uint16 length, uint16 flags)
+{
+ adma2_dscr_32b_t *adma2_dscr_table;
+ adma1_dscr_t *adma1_dscr_table;
+
+ adma2_dscr_table = sd->adma2_dscr_buf;
+ adma1_dscr_table = sd->adma2_dscr_buf;
+
+ switch (sd->sd_dma_mode) {
+ case DMA_MODE_ADMA2:
+ sd_dma(("%s: creating ADMA2 descriptor for index %d\n",
+ __FUNCTION__, index));
+
+ adma2_dscr_table[index].phys_addr = addr_phys;
+ adma2_dscr_table[index].len_attr = length << 16;
+ adma2_dscr_table[index].len_attr |= flags;
+ break;
+ case DMA_MODE_ADMA1:
+ /* ADMA1 requires two descriptors, one for len
+ * and the other for data transfer
+ */
+ index <<= 1;
+
+ sd_dma(("%s: creating ADMA1 descriptor for index %d\n",
+ __FUNCTION__, index));
+
+ adma1_dscr_table[index].phys_addr_attr = length << 12;
+ adma1_dscr_table[index].phys_addr_attr |= (ADMA1_ATTRIBUTE_ACT_SET |
+ ADMA2_ATTRIBUTE_VALID);
+ adma1_dscr_table[index+1].phys_addr_attr = addr_phys & 0xFFFFF000;
+ adma1_dscr_table[index+1].phys_addr_attr |= (flags & 0x3f);
+ break;
+ default:
+ sd_err(("%s: cannot create ADMA descriptor for DMA mode %d\n",
+ __FUNCTION__, sd->sd_dma_mode));
+ break;
+ }
+}
+
+static void
+sd_dump_adma_dscr(sdioh_info_t *sd)
+{
+ adma2_dscr_32b_t *adma2_dscr_table;
+ adma1_dscr_t *adma1_dscr_table;
+ uint32 i = 0;
+ uint16 flags;
+ char flags_str[32];
+
+ ASSERT(sd->adma2_dscr_buf != NULL);
+
+ adma2_dscr_table = sd->adma2_dscr_buf;
+ adma1_dscr_table = sd->adma2_dscr_buf;
+
+ switch (sd->sd_dma_mode) {
+ case DMA_MODE_ADMA2:
+ sd_err(("ADMA2 Descriptor Table (%dbytes) @virt/phys: %p/0x%x-0x%x\n",
+ SD_PAGE, sd->adma2_dscr_buf,
+ (uint)PHYSADDRHI(sd->adma2_dscr_phys),
+ (uint)PHYSADDRLO(sd->adma2_dscr_phys)));
+ sd_err((" #[Descr VA ] Buffer PA | Len | Flags (5:4 2 1 0)"
+ " |\n"));
+ while (adma2_dscr_table->len_attr & ADMA2_ATTRIBUTE_VALID) {
+ flags = adma2_dscr_table->len_attr & 0xFFFF;
+ sprintf(flags_str, "%s%s%s%s",
+ ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+ ADMA2_ATTRIBUTE_ACT_LINK) ? "LINK " :
+ ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+ ADMA2_ATTRIBUTE_ACT_TRAN) ? "TRAN " :
+ ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+ ADMA2_ATTRIBUTE_ACT_NOP) ? "NOP " : "RSV ",
+ (flags & ADMA2_ATTRIBUTE_INT ? "INT " : " "),
+ (flags & ADMA2_ATTRIBUTE_END ? "END " : " "),
+ (flags & ADMA2_ATTRIBUTE_VALID ? "VALID" : ""));
+ sd_err(("%2d[0x%p]: 0x%08x | 0x%04x | 0x%04x (%s) |\n",
+ i, adma2_dscr_table, adma2_dscr_table->phys_addr,
+ adma2_dscr_table->len_attr >> 16, flags, flags_str));
+ i++;
+
+#ifdef linux
+ /* Follow LINK descriptors or skip to next. */
+ if ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+ ADMA2_ATTRIBUTE_ACT_LINK) {
+ adma2_dscr_table = phys_to_virt(
+ adma2_dscr_table->phys_addr);
+ } else {
+ adma2_dscr_table++;
+ }
+#else
+ adma2_dscr_table++;
+#endif /* linux */
+
+ }
+ break;
+ case DMA_MODE_ADMA1:
+ sd_err(("ADMA1 Descriptor Table (%dbytes) @virt/phys: %p/0x%x-0x%x\n",
+ SD_PAGE, sd->adma2_dscr_buf,
+ (uint)PHYSADDRHI(sd->adma2_dscr_phys),
+ (uint)PHYSADDRLO(sd->adma2_dscr_phys)));
+ sd_err((" #[Descr VA ] Buffer PA | Flags (5:4 2 1 0) |\n"));
+
+ for (i = 0; adma1_dscr_table->phys_addr_attr & ADMA2_ATTRIBUTE_VALID; i++) {
+ flags = adma1_dscr_table->phys_addr_attr & 0x3F;
+ sprintf(flags_str, "%s%s%s%s",
+ ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+ ADMA2_ATTRIBUTE_ACT_LINK) ? "LINK " :
+ ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+ ADMA2_ATTRIBUTE_ACT_TRAN) ? "TRAN " :
+ ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+ ADMA2_ATTRIBUTE_ACT_NOP) ? "NOP " : "SET ",
+ (flags & ADMA2_ATTRIBUTE_INT ? "INT " : " "),
+ (flags & ADMA2_ATTRIBUTE_END ? "END " : " "),
+ (flags & ADMA2_ATTRIBUTE_VALID ? "VALID" : ""));
+ sd_err(("%2d[0x%p]: 0x%08x | 0x%04x | (%s) |\n",
+ i, adma1_dscr_table,
+ adma1_dscr_table->phys_addr_attr & 0xFFFFF000,
+ flags, flags_str));
+
+#ifdef linux
+ /* Follow LINK descriptors or skip to next. */
+ if ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+ ADMA2_ATTRIBUTE_ACT_LINK) {
+ adma1_dscr_table = phys_to_virt(
+ adma1_dscr_table->phys_addr_attr & 0xFFFFF000);
+ } else {
+ adma1_dscr_table++;
+ }
+#else
+ adma2_dscr_table++;
+#endif /* linux */
+ }
+ break;
+ default:
+ sd_err(("Unknown DMA Descriptor Table Format.\n"));
+ break;
+ }
+}
+
+static void
+sdstd_dumpregs(sdioh_info_t *sd)
+{
+ sd_err(("IntrStatus: 0x%04x ErrorIntrStatus 0x%04x\n",
+ sdstd_rreg16(sd, SD_IntrStatus),
+ sdstd_rreg16(sd, SD_ErrorIntrStatus)));
+ sd_err(("IntrStatusEnable: 0x%04x ErrorIntrStatusEnable 0x%04x\n",
+ sdstd_rreg16(sd, SD_IntrStatusEnable),
+ sdstd_rreg16(sd, SD_ErrorIntrStatusEnable)));
+ sd_err(("IntrSignalEnable: 0x%04x ErrorIntrSignalEnable 0x%04x\n",
+ sdstd_rreg16(sd, SD_IntrSignalEnable),
+ sdstd_rreg16(sd, SD_ErrorIntrSignalEnable)));
+}
diff --git a/bcmdhd.101.10.361.x/bcmsdstd.h b/bcmdhd.101.10.361.x/bcmsdstd.h
new file mode 100755
index 0000000..e0d19e2
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmsdstd.h
@@ -0,0 +1,301 @@
+/*
+ * 'Standard' SDIO HOST CONTROLLER driver
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmsdstd.h 833030 2019-08-02 17:22:42Z jl904071 $
+ */
+#ifndef _BCM_SD_STD_H
+#define _BCM_SD_STD_H
+
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+#ifdef BCMDBG
+#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0)
+#define sd_trace(x) do { if (sd_msglevel & SDH_TRACE_VAL) printf x; } while (0)
+#define sd_info(x) do { if (sd_msglevel & SDH_INFO_VAL) printf x; } while (0)
+#define sd_debug(x) do { if (sd_msglevel & SDH_DEBUG_VAL) printf x; } while (0)
+#define sd_data(x) do { if (sd_msglevel & SDH_DATA_VAL) printf x; } while (0)
+#define sd_ctrl(x) do { if (sd_msglevel & SDH_CTRL_VAL) printf x; } while (0)
+#define sd_dma(x) do { if (sd_msglevel & SDH_DMA_VAL) printf x; } while (0)
+#else
+#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+#define sd_dma(x)
+#endif /* BCMDBG */
+
+#define sd_sync_dma(sd, read, nbytes)
+#define sd_init_dma(sd)
+#define sd_ack_intr(sd)
+#define sd_wakeup(sd);
+/* Allocate/init/free per-OS private data */
+extern int sdstd_osinit(sdioh_info_t *sd);
+extern void sdstd_osfree(sdioh_info_t *sd);
+
+#ifdef BCMPERFSTATS
+#define sd_log(x) do { if (sd_msglevel & SDH_LOG_VAL) bcmlog x; } while (0)
+#else
+#define sd_log(x)
+#endif
+
+#define SDIOH_ASSERT(exp) \
+ do { if (!(exp)) \
+ printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+ } while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS 0
+#define ERROR 1
+
+/* private bus modes */
+#define SDIOH_MODE_SPI 0
+#define SDIOH_MODE_SD1 1
+#define SDIOH_MODE_SD4 2
+
+#define MAX_SLOTS 6 /* For PCI: Only 6 BAR entries => 6 slots */
+#define SDIOH_REG_WINSZ 0x100 /* Number of registers in Standard Host Controller */
+
+#define SDIOH_TYPE_ARASAN_HDK 1
+#define SDIOH_TYPE_BCM27XX 2
+#ifdef BCMINTERNAL
+#define SDIOH_TYPE_JINVANI_GOLD 3
+#endif
+#define SDIOH_TYPE_TI_PCIXX21 4 /* TI PCIxx21 Standard Host Controller */
+#define SDIOH_TYPE_RICOH_R5C822 5 /* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host Adapter */
+#define SDIOH_TYPE_JMICRON 6 /* JMicron Standard SDIO Host Controller */
+
+/* For linux, allow yielding for dongle */
+#if defined(linux) && defined(BCMDONGLEHOST)
+#define BCMSDYIELD
+#endif
+
+/* Expected card status value for CMD7 */
+#define SDIOH_CMD7_EXP_STATUS 0x00001E00
+
+#define RETRIES_LARGE 100000
+#ifdef BCMQT
+extern void sdstd_os_yield(sdioh_info_t *sd);
+#define RETRIES_SMALL 10000
+#else
+#define sdstd_os_yield(sd) do {} while (0)
+#define RETRIES_SMALL 100
+#endif
+
+#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */
+#define USE_MULTIBLOCK 0x4
+
+#define USE_FIFO 0x8 /* Fifo vs non-fifo */
+
+#define CLIENT_INTR 0x100 /* Get rid of this! */
+
+#define HC_INTR_RETUNING 0x1000
+
+#ifdef BCMSDIOH_TXGLOM
+/* Total glom pkt can not exceed 64K
+ * need one more slot for glom padding packet
+ */
+#define SDIOH_MAXGLOM_SIZE (40+1)
+
+typedef struct glom_buf {
+ uint32 count; /* Total number of pkts queued */
+ void *dma_buf_arr[SDIOH_MAXGLOM_SIZE]; /* Frame address */
+ dmaaddr_t dma_phys_arr[SDIOH_MAXGLOM_SIZE]; /* DMA_MAPed address of frames */
+ uint16 nbytes[SDIOH_MAXGLOM_SIZE]; /* Size of each frame */
+} glom_buf_t;
+#endif
+
+struct sdioh_info {
+ uint cfg_bar; /* pci cfg address for bar */
+ uint32 caps; /* cached value of capabilities reg */
+ uint32 curr_caps; /* max current capabilities reg */
+
+ osl_t *osh; /* osh handler */
+ volatile char *mem_space; /* pci device memory va */
+ uint lockcount; /* nest count of sdstd_lock() calls */
+ bool client_intr_enabled; /* interrupt connnected flag */
+ bool intr_handler_valid; /* client driver interrupt handler valid */
+ sdioh_cb_fn_t intr_handler; /* registered interrupt handler */
+ void *intr_handler_arg; /* argument to call interrupt handler */
+ bool initialized; /* card initialized */
+ uint target_dev; /* Target device ID */
+ uint16 intmask; /* Current active interrupts */
+ void *sdos_info; /* Pointer to per-OS private data */
+ void *bcmsdh; /* handler to upper layer stack (bcmsdh) */
+
+ uint32 controller_type; /* Host controller type */
+ uint8 version; /* Host Controller Spec Compliance Version */
+ uint irq; /* Client irq */
+ int intrcount; /* Client interrupts */
+ int local_intrcount; /* Controller interrupts */
+ bool host_init_done; /* Controller initted */
+ bool card_init_done; /* Client SDIO interface initted */
+ bool polled_mode; /* polling for command completion */
+
+ bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+ /* Must be on for sd_multiblock to be effective */
+ bool use_client_ints; /* If this is false, make sure to restore */
+ /* polling hack in wl_linux.c:wl_timer() */
+ int adapter_slot; /* Maybe dealing with multiple slots/controllers */
+ int sd_mode; /* SD1/SD4/SPI */
+ int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */
+ uint32 data_xfer_count; /* Current transfer */
+ uint16 card_rca; /* Current Address */
+ int8 sd_dma_mode; /* DMA Mode (PIO, SDMA, ... ADMA2) on CMD53 */
+ uint8 num_funcs; /* Supported funcs on client */
+ uint32 com_cis_ptr;
+ uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
+ void *dma_buf; /* DMA Buffer virtual address */
+ dmaaddr_t dma_phys; /* DMA Buffer physical address */
+ void *adma2_dscr_buf; /* ADMA2 Descriptor Buffer virtual address */
+ dmaaddr_t adma2_dscr_phys; /* ADMA2 Descriptor Buffer physical address */
+
+ /* adjustments needed to make the dma align properly */
+ void *dma_start_buf;
+ dmaaddr_t dma_start_phys;
+ uint alloced_dma_size;
+ void *adma2_dscr_start_buf;
+ dmaaddr_t adma2_dscr_start_phys;
+ uint alloced_adma2_dscr_size;
+
+ int r_cnt; /* rx count */
+ int t_cnt; /* tx_count */
+ bool got_hcint; /* local interrupt flag */
+ uint16 last_intrstatus; /* to cache intrstatus */
+ int host_UHSISupported; /* whether UHSI is supported for HC. */
+ int card_UHSI_voltage_Supported; /* whether UHSI is supported for
+ * Card in terms of Voltage [1.8 or 3.3].
+ */
+ int global_UHSI_Supp; /* type of UHSI support in both host and card.
+ * HOST_SDR_UNSUPP: capabilities not supported/matched
+ * HOST_SDR_12_25: SDR12 and SDR25 supported
+ * HOST_SDR_50_104_DDR: one of SDR50/SDR104 or DDR50 supptd
+ */
+ volatile int sd3_dat_state; /* data transfer state used for retuning check */
+ volatile int sd3_tun_state; /* tuning state used for retuning check */
+ bool sd3_tuning_reqd; /* tuning requirement parameter */
+ bool sd3_tuning_disable; /* tuning disable due to bus sleeping */
+ uint32 caps3; /* cached value of 32 MSbits capabilities reg (SDIO 3.0) */
+#ifdef BCMSDIOH_TXGLOM
+ glom_buf_t glom_info; /* pkt information used for glomming */
+ uint txglom_mode; /* Txglom mode: 0 - copy, 1 - multi-descriptor */
+#endif
+};
+
+#define DMA_MODE_NONE 0
+#define DMA_MODE_SDMA 1
+#define DMA_MODE_ADMA1 2
+#define DMA_MODE_ADMA2 3
+#define DMA_MODE_ADMA2_64 4
+#define DMA_MODE_AUTO -1
+
+#define USE_DMA(sd) ((bool)((sd->sd_dma_mode > 0) ? TRUE : FALSE))
+
+/* States for Tuning and corr data */
+#define TUNING_IDLE 0
+#define TUNING_START 1
+#define TUNING_START_AFTER_DAT 2
+#define TUNING_ONGOING 3
+
+#define DATA_TRANSFER_IDLE 0
+#define DATA_TRANSFER_ONGOING 1
+
+#define CHECK_TUNING_PRE_DATA 1
+#define CHECK_TUNING_POST_DATA 2
+
+#ifdef DHD_DEBUG
+#define SD_DHD_DISABLE_PERIODIC_TUNING 0x01
+#define SD_DHD_ENABLE_PERIODIC_TUNING 0x00
+#endif
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdstd.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/* OS-independent interrupt handler */
+extern bool check_client_intr(sdioh_info_t *sd);
+
+/* Core interrupt enable/disable of device interrupts */
+extern void sdstd_devintr_on(sdioh_info_t *sd);
+extern void sdstd_devintr_off(sdioh_info_t *sd);
+
+/* Enable/disable interrupts for local controller events */
+extern void sdstd_intrs_on(sdioh_info_t *sd, uint16 norm, uint16 err);
+extern void sdstd_intrs_off(sdioh_info_t *sd, uint16 norm, uint16 err);
+
+/* Wait for specified interrupt and error bits to be set */
+extern void sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err);
+
+/**************************************************************
+ * Internal interfaces: bcmsdstd.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *sdstd_reg_map(osl_t *osh, dmaaddr_t addr, int size);
+extern void sdstd_reg_unmap(osl_t *osh, dmaaddr_t addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int sdstd_register_irq(sdioh_info_t *sd, uint irq);
+extern void sdstd_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void sdstd_lock(sdioh_info_t *sd);
+extern void sdstd_unlock(sdioh_info_t *sd);
+extern void sdstd_waitlockfree(sdioh_info_t *sd);
+
+/* OS-specific wrappers for safe concurrent register access */
+extern void sdstd_os_lock_irqsave(sdioh_info_t *sd, ulong* flags);
+extern void sdstd_os_unlock_irqrestore(sdioh_info_t *sd, ulong* flags);
+
+/* OS-specific wait-for-interrupt-or-status */
+extern int sdstd_waitbits(sdioh_info_t *sd, uint16 norm, uint16 err, bool yield, uint16 *bits);
+
+/* used by bcmsdstd_linux [implemented in sdstd] */
+extern void sdstd_3_enable_retuning_int(sdioh_info_t *sd);
+extern void sdstd_3_disable_retuning_int(sdioh_info_t *sd);
+extern bool sdstd_3_is_retuning_int_set(sdioh_info_t *sd);
+extern void sdstd_3_check_and_do_tuning(sdioh_info_t *sd, int tuning_param);
+extern bool sdstd_3_check_and_set_retuning(sdioh_info_t *sd);
+extern int sdstd_3_get_tune_state(sdioh_info_t *sd);
+extern int sdstd_3_get_data_state(sdioh_info_t *sd);
+extern void sdstd_3_set_tune_state(sdioh_info_t *sd, int state);
+extern void sdstd_3_set_data_state(sdioh_info_t *sd, int state);
+extern uint8 sdstd_3_get_tuning_exp(sdioh_info_t *sd);
+extern uint32 sdstd_3_get_uhsi_clkmode(sdioh_info_t *sd);
+extern int sdstd_3_clk_tuning(sdioh_info_t *sd, uint32 sd3ClkMode);
+
+/* used by sdstd [implemented in bcmsdstd_linux/ndis] */
+extern void sdstd_3_start_tuning(sdioh_info_t *sd);
+extern void sdstd_3_osinit_tuning(sdioh_info_t *sd);
+extern void sdstd_3_osclean_tuning(sdioh_info_t *sd);
+
+extern void sdstd_enable_disable_periodic_timer(sd