Merge "cnss2: Assert if unexpected second QMI server arrive comes"
diff --git a/arch/arm64/configs/vendor/bengal-perf_defconfig b/arch/arm64/configs/vendor/bengal-perf_defconfig
index 97194b4..3592739 100644
--- a/arch/arm64/configs/vendor/bengal-perf_defconfig
+++ b/arch/arm64/configs/vendor/bengal-perf_defconfig
@@ -570,6 +570,7 @@
 CONFIG_QCOM_CX_IPEAK=y
 CONFIG_QTI_CRYPTO_COMMON=y
 CONFIG_QTI_CRYPTO_TZ=y
+CONFIG_QTI_HW_KEY_MANAGER=y
 CONFIG_ICNSS=y
 CONFIG_ICNSS_QMI=y
 CONFIG_DEVFREQ_GOV_PASSIVE=y
diff --git a/arch/arm64/configs/vendor/bengal_defconfig b/arch/arm64/configs/vendor/bengal_defconfig
index c241006..4467b21 100644
--- a/arch/arm64/configs/vendor/bengal_defconfig
+++ b/arch/arm64/configs/vendor/bengal_defconfig
@@ -594,6 +594,7 @@
 CONFIG_QCOM_CX_IPEAK=y
 CONFIG_QTI_CRYPTO_COMMON=y
 CONFIG_QTI_CRYPTO_TZ=y
+CONFIG_QTI_HW_KEY_MANAGER=y
 CONFIG_ICNSS=y
 CONFIG_ICNSS_DEBUG=y
 CONFIG_ICNSS_QMI=y
diff --git a/arch/arm64/configs/vendor/sdm660-perf_defconfig b/arch/arm64/configs/vendor/sdm660-perf_defconfig
new file mode 100644
index 0000000..67089f8
--- /dev/null
+++ b/arch/arm64/configs/vendor/sdm660-perf_defconfig
@@ -0,0 +1,647 @@
+CONFIG_LOCALVERSION="-perf"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_SCHED_WALT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_PSI=y
+CONFIG_PSI_FTRACE=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_BPF=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_FHANDLE is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB_FREELIST_RANDOM=y
+CONFIG_SLAB_FREELIST_HARDENED=y
+CONFIG_PROFILING=y
+CONFIG_HOTPLUG_SIZE_BITS=29
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_SDM660=y
+CONFIG_PCI=y
+CONFIG_PCI_MSM=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_HZ_100=y
+CONFIG_SECCOMP=y
+CONFIG_PRINT_VMEMLAYOUT=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_ARM64_SW_TTBR0_PAN=y
+# CONFIG_ARM64_VHE is not set
+CONFIG_RANDOMIZE_BASE=y
+# CONFIG_EFI is not set
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TIMES=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_ARM_QCOM_CPUFREQ_HW=y
+CONFIG_CPU_FREQ_MSM=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
+CONFIG_PANIC_ON_REFCOUNT_ERROR=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+CONFIG_BLK_DEV_ZONED=y
+CONFIG_BLK_INLINE_ENCRYPTION=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_CFQ_GROUP_IOSCHED=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
+CONFIG_MEMORY_HOTPLUG_MOVABLE_NODE=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_CLEANCACHE=y
+CONFIG_CMA=y
+CONFIG_CMA_AREAS=8
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_IP_SCTP=y
+CONFIG_L2TP=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_QRTR=y
+CONFIG_QRTR_SMD=y
+CONFIG_SOCKEV_NLMCAST=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_BTFM_SLIM_WCN3990=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
+CONFIG_FW_LOADER_USER_HELPER=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_WCD_IRQ=y
+CONFIG_DMA_CMA=y
+CONFIG_MTD=m
+CONFIG_ZRAM=y
+CONFIG_ZRAM_DEDUP=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_HDCP_QSEECOM=y
+CONFIG_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_MEMORY_STATE_TIME=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
+CONFIG_SCSI_UFS_CRYPTO=y
+CONFIG_SCSI_UFS_CRYPTO_QTI=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_ANDROID_VERITY=y
+CONFIG_DM_BOW=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_SKY2=y
+CONFIG_RMNET=y
+CONFIG_SMSC911X=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_USB_RTL8152=y
+CONFIG_USB_USBNET=y
+CONFIG_WIL6210=m
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS_GENL=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_SYNAPTICS_DSX is not set
+# CONFIG_TOUCHSCREEN_SYNAPTICS_TCM is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_QTI_HAPTICS=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_TTY_PRINTK=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+# CONFIG_DEVPORT is not set
+CONFIG_DIAG_CHAR=y
+CONFIG_MSM_FASTCVPD=y
+CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_QNOVO5=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=y
+CONFIG_THERMAL_TSENS=y
+CONFIG_QTI_ADC_TM=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_BCL_PMIC5=y
+CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
+CONFIG_QTI_CX_IPEAK_COOLING_DEVICE=y
+CONFIG_MFD_I2C_PMIC=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
+CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP_LCDB=y
+CONFIG_REGULATOR_QPNP_OLEDB=y
+CONFIG_REGULATOR_RPM_SMD=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_RC_CORE=m
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_FB=y
+CONFIG_FB_ARMCLCD=y
+CONFIG_FB_VIRTUAL=y
+CONFIG_BACKLIGHT_QCOM_SPMI_WLED=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
+CONFIG_SND_SOC=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_HID_SONY=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_LINK_LAYER_TEST=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_MSM_HSUSB_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_TYPEC=y
+CONFIG_USB_PD_POLICY=y
+CONFIG_QPNP_USB_PDPHY=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_TEST=m
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_CQHCI_CRYPTO=y
+CONFIG_MMC_CQHCI_CRYPTO_QTI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_QTI_TRI_LED=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_RTC_CLASS=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_GPI_DMA=y
+CONFIG_SYNC_FILE=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ION=y
+CONFIG_QPNP_REVID=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_GSI=y
+CONFIG_MSM_11AD=m
+CONFIG_USB_BAM=y
+CONFIG_HWSPINLOCK=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_MAILBOX=y
+CONFIG_QCOM_APCS_IPC=y
+CONFIG_MSM_QMP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_ARM_SMMU_TESTBUS_DUMP=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_RPMSG_CHAR=y
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+CONFIG_RPMSG_QCOM_GLINK_SMEM=y
+CONFIG_RPMSG_QCOM_GLINK_SPSS=y
+CONFIG_RPMSG_QCOM_GLINK_SPI=y
+CONFIG_RPMSG_QCOM_SMD=y
+CONFIG_MSM_RPM_SMD=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_QCOM_MEM_OFFLINE=y
+CONFIG_OVERRIDE_MEMORY_LIMIT=y
+CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_QPNP_PBS=y
+CONFIG_QCOM_QMI_HELPERS=y
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_SMD_RPM=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_SETUP_SSR_NOTIF_TIMEOUTS=y
+CONFIG_SSR_SYSMON_NOTIF_TIMEOUT=20000
+CONFIG_SSR_SUBSYS_NOTIF_TIMEOUT=20000
+CONFIG_PANIC_ON_SSR_NOTIF_TIMEOUT=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_QMI_COMM=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_DCC_V2=y
+CONFIG_QCOM_MINIDUMP=y
+CONFIG_MSM_CORE_HANG_DETECT=y
+CONFIG_MSM_GLADIATOR_HANG_DETECT=y
+CONFIG_QCOM_FSA4480_I2C=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QCOM_WDOG_IPI_ENABLE=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
+CONFIG_QSEE_IPC_IRQ_BRIDGE=y
+CONFIG_QCOM_GLINK=y
+CONFIG_QCOM_GLINK_PKT=y
+CONFIG_QCOM_SMP2P_SLEEPSTATE=y
+CONFIG_MSM_CDSP_LOADER=y
+CONFIG_QCOM_SMCINVOKE=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_PM=y
+CONFIG_QTI_RPM_STATS_LOG=y
+CONFIG_QTEE_SHM_BRIDGE=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
+CONFIG_MSM_PERFORMANCE=y
+CONFIG_QCOM_CDSP_RM=y
+CONFIG_QCOM_CX_IPEAK=y
+CONFIG_QTI_CRYPTO_COMMON=y
+CONFIG_QTI_CRYPTO_TZ=y
+CONFIG_ICNSS=y
+CONFIG_ICNSS_QMI=y
+CONFIG_DEVFREQ_GOV_PASSIVE=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_DEVFREQ_SIMPLE_DEV=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_IIO=y
+CONFIG_QCOM_SPMI_ADC5=y
+CONFIG_QCOM_SPMI_VADC=y
+CONFIG_PWM=y
+CONFIG_PWM_QTI_LPG=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_QCOM_MPM=y
+CONFIG_PHY_XGENE=y
+CONFIG_RAS=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_QCOM_QFPROM=y
+CONFIG_NVMEM_SPMI_SDAM=y
+CONFIG_SLIMBUS_MSM_NGD=y
+CONFIG_SENSORS_SSC=y
+CONFIG_QCOM_KGSL=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_FS_ENCRYPTION=y
+CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_HARDENED_USERCOPY_PAGESPAN=y
+CONFIG_FORTIFY_SOURCE=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
+CONFIG_XZ_DEC=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_PAGE_OWNER=y
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_SCHEDSTATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_IPC_LOGGING=y
+CONFIG_DEBUG_ALIGN_RODATA=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SOURCE_ETM4X=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
diff --git a/arch/arm64/configs/vendor/sdm660_defconfig b/arch/arm64/configs/vendor/sdm660_defconfig
new file mode 100644
index 0000000..6e561f2
--- /dev/null
+++ b/arch/arm64/configs/vendor/sdm660_defconfig
@@ -0,0 +1,698 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_SCHED_WALT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_PSI=y
+CONFIG_PSI_FTRACE=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_DEBUG_BLK_CGROUP=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_BPF=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_FHANDLE is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB_FREELIST_RANDOM=y
+CONFIG_SLAB_FREELIST_HARDENED=y
+CONFIG_PROFILING=y
+# CONFIG_ZONE_DMA32 is not set
+CONFIG_HOTPLUG_SIZE_BITS=29
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_SDM660=y
+CONFIG_PCI=y
+CONFIG_PCI_MSM=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_HZ_100=y
+CONFIG_SECCOMP=y
+CONFIG_PRINT_VMEMLAYOUT=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_ARM64_SW_TTBR0_PAN=y
+# CONFIG_ARM64_VHE is not set
+CONFIG_RANDOMIZE_BASE=y
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM_DEBUG=y
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TIMES=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_ARM_QCOM_CPUFREQ_HW=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
+CONFIG_PANIC_ON_REFCOUNT_ERROR=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_BLK_DEV_ZONED=y
+CONFIG_BLK_INLINE_ENCRYPTION=y
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_CFQ_GROUP_IOSCHED=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
+CONFIG_MEMORY_HOTPLUG_MOVABLE_NODE=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_CLEANCACHE=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUG=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_CMA_ALLOW_WRITE_DEBUGFS=y
+CONFIG_CMA_AREAS=8
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_IP_SCTP=y
+CONFIG_L2TP=y
+CONFIG_L2TP_DEBUGFS=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_QRTR=y
+CONFIG_QRTR_SMD=y
+CONFIG_SOCKEV_NLMCAST=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_BTFM_SLIM_WCN3990=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+# CONFIG_CFG80211_CRDA_SUPPORT is not set
+CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
+CONFIG_FW_LOADER_USER_HELPER=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_WCD_IRQ=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
+CONFIG_DMA_CMA=y
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_HDCP_QSEECOM=y
+CONFIG_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_MEMORY_STATE_TIME=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
+CONFIG_SCSI_UFS_CRYPTO=y
+CONFIG_SCSI_UFS_CRYPTO_QTI=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_ANDROID_VERITY=y
+CONFIG_DM_BOW=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_RMNET=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_USB_RTL8152=y
+CONFIG_USB_USBNET=y
+CONFIG_WIL6210=m
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS_GENL=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_SYNAPTICS_DSX is not set
+# CONFIG_TOUCHSCREEN_SYNAPTICS_TCM is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_QTI_HAPTICS=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_TTY_PRINTK=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+# CONFIG_DEVPORT is not set
+CONFIG_DIAG_CHAR=y
+CONFIG_MSM_FASTCVPD=y
+CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=y
+CONFIG_THERMAL_TSENS=y
+CONFIG_QTI_ADC_TM=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_BCL_PMIC5=y
+CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
+CONFIG_MFD_I2C_PMIC=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP_LCDB=y
+CONFIG_REGULATOR_QPNP_OLEDB=y
+CONFIG_REGULATOR_RPM_SMD=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_FB=y
+CONFIG_FB_VIRTUAL=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_QCOM_SPMI_WLED=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
+CONFIG_SND_SOC=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_HID_SONY=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_LINK_LAYER_TEST=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_MSM_HSUSB_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_TYPEC=y
+CONFIG_USB_PD_POLICY=y
+CONFIG_QPNP_USB_PDPHY=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_CQHCI_CRYPTO=y
+CONFIG_MMC_CQHCI_CRYPTO_QTI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_RTC_CLASS=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_GPI_DMA=y
+CONFIG_QCOM_GPI_DMA_DEBUG=y
+CONFIG_SYNC_FILE=y
+CONFIG_DEBUG_DMA_BUF_REF=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ION=y
+CONFIG_QPNP_REVID=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_GSI=y
+CONFIG_MSM_11AD=m
+CONFIG_USB_BAM=y
+CONFIG_HWSPINLOCK=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_MAILBOX=y
+CONFIG_QCOM_APCS_IPC=y
+CONFIG_MSM_QMP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_IOMMU_TLBSYNC_DEBUG=y
+CONFIG_ARM_SMMU_TESTBUS_DUMP=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_RPMSG_CHAR=y
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+CONFIG_RPMSG_QCOM_GLINK_SMEM=y
+CONFIG_RPMSG_QCOM_GLINK_SPSS=y
+CONFIG_RPMSG_QCOM_GLINK_SPI=y
+CONFIG_RPMSG_QCOM_SMD=y
+CONFIG_MSM_RPM_SMD=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_QCOM_MEM_OFFLINE=y
+CONFIG_OVERRIDE_MEMORY_LIMIT=y
+CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_QPNP_PBS=y
+CONFIG_QCOM_QMI_HELPERS=y
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_SMD_RPM=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_SETUP_SSR_NOTIF_TIMEOUTS=y
+CONFIG_SSR_SYSMON_NOTIF_TIMEOUT=20000
+CONFIG_SSR_SUBSYS_NOTIF_TIMEOUT=20000
+CONFIG_PANIC_ON_SSR_NOTIF_TIMEOUT=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_QMI_COMM=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_DCC_V2=y
+CONFIG_QCOM_MINIDUMP=y
+CONFIG_MSM_CORE_HANG_DETECT=y
+CONFIG_MSM_GLADIATOR_HANG_DETECT=y
+CONFIG_QCOM_FSA4480_I2C=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QCOM_WDOG_IPI_ENABLE=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
+CONFIG_QSEE_IPC_IRQ_BRIDGE=y
+CONFIG_QCOM_GLINK=y
+CONFIG_QCOM_GLINK_PKT=y
+CONFIG_QCOM_SMP2P_SLEEPSTATE=y
+CONFIG_MSM_CDSP_LOADER=y
+CONFIG_QCOM_SMCINVOKE=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_PM=y
+CONFIG_QTI_RPM_STATS_LOG=y
+CONFIG_QTEE_SHM_BRIDGE=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
+CONFIG_MSM_PERFORMANCE=y
+CONFIG_QCOM_CDSP_RM=y
+CONFIG_QCOM_CX_IPEAK=y
+CONFIG_QTI_CRYPTO_COMMON=y
+CONFIG_QTI_CRYPTO_TZ=y
+CONFIG_ICNSS=y
+CONFIG_ICNSS_DEBUG=y
+CONFIG_ICNSS_QMI=y
+CONFIG_DEVFREQ_GOV_PASSIVE=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_IIO=y
+CONFIG_QCOM_SPMI_ADC5=y
+CONFIG_QCOM_SPMI_VADC=y
+CONFIG_PWM=y
+CONFIG_PWM_QTI_LPG=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_QCOM_MPM=y
+CONFIG_PHY_XGENE=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_QCOM_QFPROM=y
+CONFIG_NVMEM_SPMI_SDAM=y
+CONFIG_SENSORS_SSC=y
+CONFIG_QCOM_KGSL=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_FS_ENCRYPTION=y
+CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_EFIVAR_FS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_HARDENED_USERCOPY_PAGESPAN=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
+CONFIG_XZ_DEC=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_MODULE_LOAD_INFO=y
+CONFIG_DEBUG_INFO=y
+CONFIG_PAGE_OWNER=y
+CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_SLUB_DEBUG_PANIC_ON=y
+CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
+CONFIG_PAGE_POISONING=y
+CONFIG_PAGE_POISONING_ENABLE_DEFAULT=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_FREE=y
+CONFIG_DEBUG_OBJECTS_TIMERS=y
+CONFIG_DEBUG_OBJECTS_WORK=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
+CONFIG_SLUB_DEBUG_ON=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
+CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_WQ_WATCHDOG=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_PANIC_ON_SCHED_BUG=y
+CONFIG_PANIC_ON_RT_THROTTLING=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_STACK_END_CHECK=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_SG=y
+CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_DEBUG_CREDENTIALS=y
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAIL_PAGE_ALLOC=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_UFS_FAULT_INJECTION=y
+CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
+CONFIG_IPC_LOGGING=y
+CONFIG_QCOM_RTB=y
+CONFIG_QCOM_RTB_SEPARATE_CPUS=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_PREEMPTIRQ_EVENTS=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_LKDTM=m
+CONFIG_MEMTEST=y
+CONFIG_BUG_ON_DATA_CORRUPTION=y
+CONFIG_PANIC_ON_DATA_CORRUPTION=y
+CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SOURCE_ETM4X=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index c6b8653..883c872 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -3213,7 +3213,7 @@ static int fastrpc_internal_munmap(struct fastrpc_file *fl,
 		pr_err("adsprpc: ERROR: %s: user application %s trying to unmap without initialization\n",
 			 __func__, current->comm);
 		err = EBADR;
-		goto bail;
+		return err;
 	}
 	mutex_lock(&fl->internal_map_mutex);
 
@@ -3262,6 +3262,11 @@ static int fastrpc_internal_munmap(struct fastrpc_file *fl,
 	return err;
 }
 
+/*
+ *	fastrpc_internal_munmap_fd can only be used for buffers
+ *	mapped with persist attributes. This can only be called
+ *	once for any persist buffer
+ */
 static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl,
 				struct fastrpc_ioctl_munmap_fd *ud)
 {
@@ -3270,14 +3275,15 @@ static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl,
 
 	VERIFY(err, (fl && ud));
 	if (err)
-		goto bail;
+		return err;
 	VERIFY(err, fl->dsp_proc_init == 1);
 	if (err) {
 		pr_err("adsprpc: ERROR: %s: user application %s trying to unmap without initialization\n",
 			__func__, current->comm);
 		err = EBADR;
-		goto bail;
+		return err;
 	}
+	mutex_lock(&fl->internal_map_mutex);
 	mutex_lock(&fl->map_mutex);
 	if (fastrpc_mmap_find(fl, ud->fd, ud->va, ud->len, 0, 0, &map)) {
 		pr_err("adsprpc: mapping not found to unmap fd 0x%x, va 0x%llx, len 0x%x\n",
@@ -3287,10 +3293,13 @@ static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl,
 		mutex_unlock(&fl->map_mutex);
 		goto bail;
 	}
-	if (map)
+	if (map && (map->attr & FASTRPC_ATTR_KEEP_MAP)) {
+		map->attr = map->attr & (~FASTRPC_ATTR_KEEP_MAP);
 		fastrpc_mmap_free(map, 0);
+	}
 	mutex_unlock(&fl->map_mutex);
 bail:
+	mutex_unlock(&fl->internal_map_mutex);
 	return err;
 }
 
@@ -3309,7 +3318,7 @@ static int fastrpc_internal_mmap(struct fastrpc_file *fl,
 		pr_err("adsprpc: ERROR: %s: user application %s trying to map without initialization\n",
 			__func__, current->comm);
 		err = EBADR;
-		goto bail;
+		return err;
 	}
 	mutex_lock(&fl->internal_map_mutex);
 	if ((ud->flags == ADSP_MMAP_ADD_PAGES) ||
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 5108bca..4f38dba 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -1668,10 +1668,13 @@ unsigned char *dci_get_buffer_from_bridge(int token)
 {
 	uint8_t retries = 0, max_retries = 50;
 	unsigned char *buf = NULL;
+	unsigned long flags;
 
 	do {
+		spin_lock_irqsave(&driver->dci_mempool_lock, flags);
 		buf = diagmem_alloc(driver, DIAG_MDM_BUF_SIZE,
 				    dci_ops_tbl[token].mempool);
+		spin_unlock_irqrestore(&driver->dci_mempool_lock, flags);
 		if (!buf) {
 			usleep_range(5000, 5100);
 			retries++;
@@ -1689,13 +1692,16 @@ int diag_dci_write_bridge(int token, unsigned char *buf, int len)
 
 int diag_dci_write_done_bridge(int index, unsigned char *buf, int len)
 {
+	unsigned long flags;
 	int token = BRIDGE_TO_TOKEN(index);
 
 	if (!VALID_DCI_TOKEN(token)) {
 		pr_err("diag: Invalid DCI token %d in %s\n", token, __func__);
 		return -EINVAL;
 	}
+	spin_lock_irqsave(&driver->dci_mempool_lock, flags);
 	diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+	spin_unlock_irqrestore(&driver->dci_mempool_lock, flags);
 	return 0;
 }
 #endif
@@ -1709,6 +1715,7 @@ static int diag_send_dci_pkt_remote(unsigned char *data, int len, int tag,
 	int dci_header_size = sizeof(struct diag_dci_header_t);
 	int ret = DIAG_DCI_NO_ERROR;
 	uint32_t write_len = 0;
+	unsigned long flags;
 
 	if (!data)
 		return -EIO;
@@ -1742,7 +1749,9 @@ static int diag_send_dci_pkt_remote(unsigned char *data, int len, int tag,
 	if (ret) {
 		pr_err("diag: error writing dci pkt to remote proc, token: %d, err: %d\n",
 			token, ret);
+		spin_lock_irqsave(&driver->dci_mempool_lock, flags);
 		diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+		spin_unlock_irqrestore(&driver->dci_mempool_lock, flags);
 	} else {
 		ret = DIAG_DCI_NO_ERROR;
 	}
@@ -1766,6 +1775,7 @@ int diag_dci_send_handshake_pkt(int index)
 	struct diag_ctrl_dci_handshake_pkt ctrl_pkt;
 	unsigned char *buf = NULL;
 	struct diag_dci_header_t dci_header;
+	unsigned long flags;
 
 	if (!VALID_DCI_TOKEN(token)) {
 		pr_err("diag: In %s, invalid DCI token %d\n", __func__, token);
@@ -1805,7 +1815,9 @@ int diag_dci_send_handshake_pkt(int index)
 	if (err) {
 		pr_err("diag: error writing ack packet to remote proc, token: %d, err: %d\n",
 		       token, err);
+		spin_lock_irqsave(&driver->dci_mempool_lock, flags);
 		diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+		spin_unlock_irqrestore(&driver->dci_mempool_lock, flags);
 		return err;
 	}
 
@@ -2469,6 +2481,7 @@ int diag_send_dci_event_mask_remote(int token)
 	int i, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
 	unsigned char *event_mask_ptr = NULL;
 	uint32_t write_len = 0;
+	unsigned long flags;
 
 	mutex_lock(&dci_event_mask_mutex);
 	event_mask_ptr = dci_ops_tbl[token].event_mask_composite;
@@ -2514,7 +2527,9 @@ int diag_send_dci_event_mask_remote(int token)
 	if (err) {
 		pr_err("diag: error writing event mask to remote proc, token: %d, err: %d\n",
 		       token, err);
+		spin_lock_irqsave(&driver->dci_mempool_lock, flags);
 		diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+		spin_unlock_irqrestore(&driver->dci_mempool_lock, flags);
 		ret = err;
 	} else {
 		ret = DIAG_DCI_NO_ERROR;
@@ -2671,6 +2686,7 @@ int diag_send_dci_log_mask_remote(int token)
 	int i, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
 	int updated;
 	uint32_t write_len = 0;
+	unsigned long flags;
 
 	mutex_lock(&dci_log_mask_mutex);
 	log_mask_ptr = dci_ops_tbl[token].log_mask_composite;
@@ -2710,7 +2726,10 @@ int diag_send_dci_log_mask_remote(int token)
 		if (err) {
 			pr_err("diag: error writing log mask to remote processor, equip_id: %d, token: %d, err: %d\n",
 			       i, token, err);
+			spin_lock_irqsave(&driver->dci_mempool_lock, flags);
 			diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+			spin_unlock_irqrestore(&driver->dci_mempool_lock,
+				flags);
 			updated = 0;
 		}
 		if (updated)
@@ -2850,6 +2869,7 @@ int diag_dci_init(void)
 	mutex_init(&dci_log_mask_mutex);
 	mutex_init(&dci_event_mask_mutex);
 	spin_lock_init(&ws_lock);
+	spin_lock_init(&driver->dci_mempool_lock);
 
 	ret = diag_dci_init_ops_tbl();
 	if (ret)
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 0516429..880cbeb 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -797,6 +797,7 @@ struct diagchar_dev {
 	struct mutex diag_id_mutex;
 	struct mutex diagid_v2_mutex;
 	struct mutex cmd_reg_mutex;
+	spinlock_t dci_mempool_lock;
 	uint32_t cmd_reg_count;
 	struct mutex diagfwd_channel_mutex[NUM_PERIPHERALS];
 	/* Sizes that reflect memory pool sizes */
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 65f5613..7221983 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -623,6 +623,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
 	dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
 	dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
 	dmabuf->buf_name = bufname;
+	dmabuf->name = bufname;
 	dmabuf->ktime = ktime_get();
 	atomic_set(&dmabuf->dent_count, 1);
 
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index ef4c8f2..2b22042 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -404,6 +404,7 @@ enum a6xx_debugbus_id {
 	A6XX_DBGBUS_RAS          = 0xc,
 	A6XX_DBGBUS_VSC          = 0xd,
 	A6XX_DBGBUS_COM          = 0xe,
+	A6XX_DBGBUS_COM_1        = 0xf,
 	A6XX_DBGBUS_LRZ          = 0x10,
 	A6XX_DBGBUS_A2D          = 0x11,
 	A6XX_DBGBUS_CCUFCHE      = 0x12,
@@ -515,6 +516,11 @@ static const struct adreno_debugbus_block a650_dbgc_debugbus_blocks[] = {
 	{ A6XX_DBGBUS_SPTP_5, 0x100, },
 };
 
+static const struct adreno_debugbus_block a702_dbgc_debugbus_blocks[] = {
+	{ A6XX_DBGBUS_COM_1, 0x100, },
+	{ A6XX_DBGBUS_SPTP_0, 0x100, },
+};
+
 #define A6XX_NUM_SHADER_BANKS 3
 #define A6XX_SHADER_STATETYPE_SHIFT 8
 
@@ -1528,6 +1534,15 @@ static void a6xx_snapshot_debugbus(struct adreno_device *adreno_dev,
 		}
 	}
 
+	if (adreno_is_a702(adreno_dev)) {
+		for (i = 0; i < ARRAY_SIZE(a702_dbgc_debugbus_blocks); i++) {
+			kgsl_snapshot_add_section(device,
+				KGSL_SNAPSHOT_SECTION_DEBUGBUS,
+				snapshot, a6xx_snapshot_dbgc_debugbus_block,
+				(void *) &a702_dbgc_debugbus_blocks[i]);
+		}
+	}
+
 	/*
 	 * GBIF has same debugbus as of other GPU blocks hence fall back to
 	 * default path if GPU uses GBIF.
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index 1854e9b..ab4a6af 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -138,6 +138,7 @@ static const struct of_device_id qcom_apcs_ipc_of_match[] = {
 	{ .compatible = "qcom,kona-spcs-global", .data = (void *)0 },
 	{ .compatible = "qcom,bengal-apcs-hmss-global", .data = (void *)8 },
 	{ .compatible = "qcom,scuba-apcs-hmss-global", .data = (void *)8 },
+	{ .compatible = "qcom,sdm660-apcs-hmss-global", .data = (void *)8 },
 	{}
 };
 MODULE_DEVICE_TABLE(of, qcom_apcs_ipc_of_match);
diff --git a/drivers/md/dm-default-key.c b/drivers/md/dm-default-key.c
index 19be201..ea29ebc 100644
--- a/drivers/md/dm-default-key.c
+++ b/drivers/md/dm-default-key.c
@@ -133,9 +133,11 @@ static int default_key_ctr_optional(struct dm_target *ti,
 	return 0;
 }
 
-void default_key_adjust_sector_size_and_iv(char **argv, struct dm_target *ti,
-					   struct default_key_c **dkc, u8 *raw,
-					   u32 size)
+static void default_key_adjust_sector_size_and_iv(char **argv,
+						  struct dm_target *ti,
+						  struct default_key_c **dkc,
+						  u8 *raw, u32 size,
+						  bool is_legacy)
 {
 	struct dm_dev *dev;
 	int i;
@@ -146,7 +148,7 @@ void default_key_adjust_sector_size_and_iv(char **argv, struct dm_target *ti,
 
 	dev = (*dkc)->dev;
 
-	if (!strcmp(argv[0], "AES-256-XTS")) {
+	if (is_legacy) {
 		memcpy(key_new.bytes, raw, size);
 
 		for (i = 0; i < ARRAY_SIZE(key_new.words); i++)
@@ -179,6 +181,24 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 	unsigned long long tmpll;
 	char dummy;
 	int err;
+	char *_argv[10];
+	bool is_legacy = false;
+
+	if (argc >= 4 && !strcmp(argv[0], "AES-256-XTS")) {
+		argc = 0;
+		_argv[argc++] = "aes-xts-plain64";
+		_argv[argc++] = argv[1];
+		_argv[argc++] = "0";
+		_argv[argc++] = argv[2];
+		_argv[argc++] = argv[3];
+		_argv[argc++] = "3";
+		_argv[argc++] = "allow_discards";
+		_argv[argc++] = "sector_size:4096";
+		_argv[argc++] = "iv_large_sectors";
+		_argv[argc] = NULL;
+		argv = _argv;
+		is_legacy = true;
+	}
 
 	if (argc < 5) {
 		ti->error = "Not enough arguments";
@@ -254,7 +274,7 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 	}
 
 	default_key_adjust_sector_size_and_iv(argv, ti, &dkc, raw_key,
-					      raw_key_size);
+					      raw_key_size, is_legacy);
 
 	dkc->sector_bits = ilog2(dkc->sector_size);
 	if (ti->len & ((dkc->sector_size >> SECTOR_SHIFT) - 1)) {
diff --git a/drivers/media/radio/rtc6226/radio-rtc6226-common.c b/drivers/media/radio/rtc6226/radio-rtc6226-common.c
index dfc8096..2e50c4f 100644
--- a/drivers/media/radio/rtc6226/radio-rtc6226-common.c
+++ b/drivers/media/radio/rtc6226/radio-rtc6226-common.c
@@ -385,8 +385,30 @@ void rtc6226_scan(struct work_struct *work)
 			next_freq_khz, radio->registers[RSSI] & RSSI_RSSI);
 
 		if (radio->registers[STATUS] & STATUS_SF) {
-			FMDERR("%s band limit reached. Seek one more.\n",
+			FMDERR("%s Seek one more time if lower freq is valid\n",
 					__func__);
+			retval = rtc6226_set_seek(radio, SRCH_UP, WRAP_ENABLE);
+			if (retval < 0) {
+				FMDERR("%s seek fail %d\n", __func__, retval);
+				goto seek_tune_fail;
+			}
+			if (!wait_for_completion_timeout(&radio->completion,
+					msecs_to_jiffies(WAIT_TIMEOUT_MSEC))) {
+				FMDERR("timeout didn't receive STC for seek\n");
+			} else {
+				FMDERR("%s: received STC for seek\n", __func__);
+				retval = rtc6226_get_freq(radio,
+						&next_freq_khz);
+				if (retval < 0) {
+					FMDERR("%s getFreq failed\n", __func__);
+					goto seek_tune_fail;
+				}
+				if ((radio->recv_conf.band_low_limit *
+						TUNE_STEP_SIZE) ==
+							next_freq_khz)
+					rtc6226_q_event(radio,
+						RTC6226_EVT_TUNE_SUCC);
+			}
 			break;
 		}
 		if (radio->g_search_mode == SCAN)
@@ -438,13 +460,12 @@ void rtc6226_scan(struct work_struct *work)
 		if (!wait_for_completion_timeout(&radio->completion,
 			msecs_to_jiffies(WAIT_TIMEOUT_MSEC)))
 			FMDERR("%s: didn't receive STD for tune\n", __func__);
-		else {
+		else
 			FMDERR("%s: received STD for tune\n", __func__);
-			rtc6226_q_event(radio, RTC6226_EVT_TUNE_SUCC);
-		}
 	}
 seek_cancelled:
 	rtc6226_q_event(radio, RTC6226_EVT_SEEK_COMPLETE);
+	rtc6226_q_event(radio, RTC6226_EVT_TUNE_SUCC);
 	radio->seek_tune_status = NO_SEEK_TUNE_PENDING;
 	FMDERR("%s seek cancelled %d\n", __func__, retval);
 	return;
diff --git a/drivers/media/radio/rtc6226/radio-rtc6226-i2c.c b/drivers/media/radio/rtc6226/radio-rtc6226-i2c.c
index f4e62c1..4ea5011 100644
--- a/drivers/media/radio/rtc6226/radio-rtc6226-i2c.c
+++ b/drivers/media/radio/rtc6226/radio-rtc6226-i2c.c
@@ -523,13 +523,9 @@ static int rtc6226_fm_power_cfg(struct rtc6226_device *radio, bool powerflag)
 int rtc6226_fops_open(struct file *file)
 {
 	struct rtc6226_device *radio = video_drvdata(file);
-	int retval = v4l2_fh_open(file);
+	int retval;
 
 	FMDBG("%s enter user num = %d\n", __func__, radio->users);
-	if (retval) {
-		FMDERR("%s fail to open v4l2\n", __func__);
-		return retval;
-	}
 	if (atomic_inc_return(&radio->users) != 1) {
 		FMDERR("Device already in use. Try again later\n");
 		atomic_dec(&radio->users);
@@ -560,7 +556,6 @@ int rtc6226_fops_open(struct file *file)
 	rtc6226_fm_power_cfg(radio, TURNING_OFF);
 open_err_setup:
 	atomic_dec(&radio->users);
-	v4l2_fh_release(file);
 	return retval;
 }
 
@@ -573,18 +568,16 @@ int rtc6226_fops_release(struct file *file)
 	int retval = 0;
 
 	FMDBG("%s : Exit\n", __func__);
-	if (v4l2_fh_is_singular_file(file)) {
-		if (radio->mode != FM_OFF) {
-			rtc6226_power_down(radio);
-			radio->mode = FM_OFF;
-		}
+	if (radio->mode != FM_OFF) {
+		rtc6226_power_down(radio);
+		radio->mode = FM_OFF;
 	}
 	rtc6226_disable_irq(radio);
 	atomic_dec(&radio->users);
 	retval = rtc6226_fm_power_cfg(radio, TURNING_OFF);
 	if (retval < 0)
 		FMDERR("%s: failed to apply voltage\n", __func__);
-	return v4l2_fh_release(file);
+	return retval;
 }
 
 static int rtc6226_parse_dt(struct device *dev,
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 02b5509..9d097b8 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -163,6 +163,8 @@
 
 #define INVALID_TUNING_PHASE	-1
 #define sdhci_is_valid_gpio_wakeup_int(_h) ((_h)->pdata->sdiowakeup_irq >= 0)
+#define sdhci_is_valid_gpio_testbus_trigger_int(_h) \
+	((_h)->pdata->testbus_trigger_irq >= 0)
 
 #define NUM_TUNING_PHASES		16
 #define MAX_DRV_TYPES_SUPPORTED_HS200	4
@@ -1210,7 +1212,115 @@ static void sdhci_msm_set_mmc_drv_type(struct sdhci_host *host, u32 opcode,
 			drv_type);
 }
 
+#define MAX_TESTBUS 127
 #define IPCAT_MINOR_MASK(val) ((val & 0x0fff0000) >> 0x10)
+#define TB_CONF_MASK 0x7f
+#define TB_TRIG_CONF 0xff80ffff
+#define TB_WRITE_STATUS BIT(8)
+
+/*
+ * This function needs to be used when getting mask and
+ * match pattern either from cmdline or sysfs
+ */
+void sdhci_msm_mm_dbg_configure(struct sdhci_host *host, u32 mask,
+			u32 match, u32 bit_shift, u32 testbus)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	struct platform_device *pdev = msm_host->pdev;
+	u32 val;
+	u32 enable_dbg_feature = 0;
+	int ret = 0;
+
+	if (testbus > MAX_TESTBUS) {
+		dev_err(&pdev->dev, "%s: testbus should be less than 128.\n",
+						__func__);
+		return;
+	}
+
+	/* Enable debug mode */
+	writel_relaxed(ENABLE_DBG,
+			host->ioaddr + SDCC_TESTBUS_CONFIG);
+	writel_relaxed(DUMMY,
+			host->ioaddr + SDCC_DEBUG_EN_DIS_REG);
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			SDCC_TESTBUS_CONFIG) | TESTBUS_EN),
+			host->ioaddr + SDCC_TESTBUS_CONFIG);
+
+	/* Enable particular feature */
+	enable_dbg_feature |= MM_TRIGGER_DISABLE;
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			SDCC_DEBUG_FEATURE_CFG_REG) | enable_dbg_feature),
+			host->ioaddr + SDCC_DEBUG_FEATURE_CFG_REG);
+
+	/* Configure Mask & Match pattern*/
+	writel_relaxed((mask << bit_shift),
+			host->ioaddr + SDCC_DEBUG_MASK_PATTERN_REG);
+	writel_relaxed((match << bit_shift),
+			host->ioaddr + SDCC_DEBUG_MATCH_PATTERN_REG);
+
+	/* Configure test bus for above mm */
+	writel_relaxed((testbus & TB_CONF_MASK), host->ioaddr +
+			SDCC_DEBUG_MM_TB_CFG_REG);
+	/* Initiate conf shifting */
+	writel_relaxed(BIT(8),
+			host->ioaddr + SDCC_DEBUG_MM_TB_CFG_REG);
+
+	/* Wait for test bus to be configured */
+	ret = readl_poll_timeout(host->ioaddr + SDCC_DEBUG_MM_TB_CFG_REG,
+			val, !(val & TB_WRITE_STATUS), 50, 1000);
+	if (ret == -ETIMEDOUT)
+		pr_err("%s: Unable to set mask & match\n",
+				mmc_hostname(host->mmc));
+
+	/* Direct test bus to GPIO */
+	writel_relaxed(((readl_relaxed(host->ioaddr +
+				SDCC_TESTBUS_CONFIG) & TB_TRIG_CONF)
+				| (testbus << 16)), host->ioaddr +
+				SDCC_TESTBUS_CONFIG);
+
+	/* Read back to ensure write went through */
+	readl_relaxed(host->ioaddr + SDCC_DEBUG_FEATURE_CFG_REG);
+}
+
+static ssize_t store_mask_and_match(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	unsigned long value;
+	char *token;
+	int i = 0;
+	u32 mask, match, bit_shift, testbus;
+
+	char *temp = (char *)buf;
+
+	if (!host)
+		return -EINVAL;
+
+	while ((token = strsep(&temp, " "))) {
+		kstrtoul(token, 0, &value);
+		if (i == 0)
+			mask = value;
+		else if (i == 1)
+			match = value;
+		else if (i == 2)
+			bit_shift = value;
+		else if (i == 3) {
+			testbus = value;
+			break;
+		}
+		i++;
+	}
+
+	pr_info("%s: M&M parameter passed are: %d %d %d %d\n",
+		mmc_hostname(host->mmc), mask, match, bit_shift, testbus);
+	pm_runtime_get_sync(dev);
+	sdhci_msm_mm_dbg_configure(host, mask, match, bit_shift, testbus);
+	pm_runtime_put_sync(dev);
+
+	pr_debug("%s: M&M debug enabled.\n", mmc_hostname(host->mmc));
+	return count;
+}
 
 /* Enter sdcc debug mode */
 void sdhci_msm_enter_dbg_mode(struct sdhci_host *host)
@@ -2861,6 +2971,16 @@ static void sdhci_msm_cfg_sdiowakeup_gpio_irq(struct sdhci_host *host,
 	msm_host->is_sdiowakeup_enabled = enable;
 }
 
+static irqreturn_t sdhci_msm_testbus_trigger_irq(int irq, void *data)
+{
+	struct sdhci_host *host = (struct sdhci_host *)data;
+
+	pr_info("%s: match happened against mask\n",
+				mmc_hostname(host->mmc));
+
+	return IRQ_HANDLED;
+}
+
 static irqreturn_t sdhci_msm_sdiowakeup_irq(int irq, void *data)
 {
 	struct sdhci_host *host = (struct sdhci_host *)data;
@@ -5564,6 +5684,22 @@ static int sdhci_msm_probe(struct platform_device *pdev)
 		}
 	}
 
+	msm_host->pdata->testbus_trigger_irq = platform_get_irq_byname(pdev,
+							  "tb_trig_irq");
+	if (sdhci_is_valid_gpio_testbus_trigger_int(msm_host)) {
+		dev_info(&pdev->dev, "%s: testbus_trigger_irq = %d\n", __func__,
+				msm_host->pdata->testbus_trigger_irq);
+		ret = request_irq(msm_host->pdata->testbus_trigger_irq,
+				  sdhci_msm_testbus_trigger_irq,
+				  IRQF_SHARED | IRQF_TRIGGER_RISING,
+				  "sdhci-msm tb_trig", host);
+		if (ret) {
+			dev_err(&pdev->dev, "%s: request tb_trig IRQ %d: failed: %d\n",
+				__func__, msm_host->pdata->testbus_trigger_irq,
+				ret);
+		}
+	}
+
 	if (of_device_is_compatible(node, "qcom,sdhci-msm-cqe")) {
 		dev_dbg(&pdev->dev, "node with qcom,sdhci-msm-cqe\n");
 		ret = sdhci_msm_cqe_add_host(host, pdev);
@@ -5622,6 +5758,20 @@ static int sdhci_msm_probe(struct platform_device *pdev)
 		device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
 	}
 
+	if (IPCAT_MINOR_MASK(readl_relaxed(host->ioaddr +
+				SDCC_IP_CATALOG)) >= 2) {
+		msm_host->mask_and_match.store = store_mask_and_match;
+		sysfs_attr_init(&msm_host->mask_and_match.attr);
+		msm_host->mask_and_match.attr.name = "mask_and_match";
+		msm_host->mask_and_match.attr.mode = 0644;
+		ret = device_create_file(&pdev->dev,
+					&msm_host->mask_and_match);
+		if (ret) {
+			pr_err("%s: %s: failed creating M&M attr: %d\n",
+					mmc_hostname(host->mmc), __func__, ret);
+		}
+	}
+
 	if (sdhci_msm_is_bootdevice(&pdev->dev))
 		mmc_flush_detect_work(host->mmc);
 	/* Successful initialization */
diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h
index fa83f09..026faae 100644
--- a/drivers/mmc/host/sdhci-msm.h
+++ b/drivers/mmc/host/sdhci-msm.h
@@ -207,6 +207,7 @@ struct sdhci_msm_pltfm_data {
 	u32 *sup_clk_table;
 	unsigned char sup_clk_cnt;
 	int sdiowakeup_irq;
+	int testbus_trigger_irq;
 	struct sdhci_msm_pm_qos_data pm_qos_data;
 	u32 *bus_clk_table;
 	unsigned char bus_clk_cnt;
@@ -288,6 +289,7 @@ struct sdhci_msm_host {
 	struct completion pwr_irq_completion;
 	struct sdhci_msm_bus_vote msm_bus_vote;
 	struct device_attribute	polling;
+	struct device_attribute mask_and_match;
 	u32 clk_rate; /* Keeps track of current clock rate that is set */
 	bool tuning_done;
 	bool calibration_done;
diff --git a/drivers/net/wireless/cnss2/debug.c b/drivers/net/wireless/cnss2/debug.c
index 21ceda6..9002866 100644
--- a/drivers/net/wireless/cnss2/debug.c
+++ b/drivers/net/wireless/cnss2/debug.c
@@ -879,12 +879,6 @@ static int cnss_create_debug_only_node(struct cnss_plat_data *plat_priv)
 
 	return 0;
 }
-#else
-static int cnss_create_debug_only_node(struct cnss_plat_data *plat_priv)
-{
-	return 0;
-}
-#endif
 
 int cnss_debugfs_create(struct cnss_plat_data *plat_priv)
 {
@@ -910,6 +904,12 @@ int cnss_debugfs_create(struct cnss_plat_data *plat_priv)
 out:
 	return ret;
 }
+#else
+int cnss_debugfs_create(struct cnss_plat_data *plat_priv)
+{
+	return 0;
+}
+#endif
 
 void cnss_debugfs_destroy(struct cnss_plat_data *plat_priv)
 {
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index 280cc9e..7bb0bc03 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -665,7 +665,10 @@ int cnss_idle_restart(struct device *dev)
 		return -ENODEV;
 	}
 
-	mutex_lock(&plat_priv->driver_ops_lock);
+	if (!mutex_trylock(&plat_priv->driver_ops_lock)) {
+		cnss_pr_dbg("Another driver operation is in progress, ignore idle restart\n");
+		return -EBUSY;
+	}
 
 	cnss_pr_dbg("Doing idle restart\n");
 
@@ -2308,9 +2311,7 @@ static int cnss_probe(struct platform_device *plat_dev)
 	if (ret)
 		goto deinit_event_work;
 
-	ret = cnss_debugfs_create(plat_priv);
-	if (ret)
-		goto deinit_qmi;
+	cnss_debugfs_create(plat_priv);
 
 	ret = cnss_misc_init(plat_priv);
 	if (ret)
@@ -2329,7 +2330,6 @@ static int cnss_probe(struct platform_device *plat_dev)
 
 destroy_debugfs:
 	cnss_debugfs_destroy(plat_priv);
-deinit_qmi:
 	cnss_qmi_deinit(plat_priv);
 deinit_event_work:
 	cnss_event_work_deinit(plat_priv);
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 79986c2..f2f8560 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -2076,6 +2076,12 @@ int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops)
 		return -EEXIST;
 	}
 
+	if (!driver_ops->id_table || !pci_dev_present(driver_ops->id_table)) {
+		cnss_pr_err("PCIe device id is %x, not supported by loading driver\n",
+			    pci_priv->device_id);
+		return -ENODEV;
+	}
+
 	if (!test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state))
 		goto register_driver;
 
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index dc6efb1..e773c73 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -201,4 +201,12 @@
 	  Qualcomm Technologies Inc TLMM block found on the Qualcomm
 	  Technologies Inc SCUBA platform.
 
+config PINCTRL_SDM660
+	tristate "Qualcomm Technologies, Inc SDM660 pin controller driver"
+	depends on GPIOLIB && OF
+	select PINCTRL_MSM
+	help
+	 This is the pinctrl, pinmux, pinconf and gpiolib driver for
+	 the Qualcomm Technologies Inc TLMM block found in the
+	 Qualcomm Technologies, Inc. SDM660 platform.
 endif
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 996270d..e178e71 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -25,3 +25,4 @@
 obj-$(CONFIG_PINCTRL_BENGAL) += pinctrl-bengal.o
 obj-$(CONFIG_PINCTRL_LAGOON) += pinctrl-lagoon.o
 obj-$(CONFIG_PINCTRL_SCUBA) += pinctrl-scuba.o
+obj-$(CONFIG_PINCTRL_SDM660)   += pinctrl-sdm660.o
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm660.c b/drivers/pinctrl/qcom/pinctrl-sdm660.c
new file mode 100644
index 0000000..2d900b4
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sdm660.c
@@ -0,0 +1,1750 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016, 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname)					\
+	[msm_mux_##fname] = {		                \
+		.name = #fname,				\
+		.groups = fname##_groups,               \
+		.ngroups = ARRAY_SIZE(fname##_groups),	\
+	}
+
+#define NORTH	0x00900000
+#define CENTER	0x00500000
+#define SOUTH	0x00100000
+#define REG_SIZE 0x1000
+#define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9)	\
+	{					        \
+		.name = "gpio" #id,			\
+		.pins = gpio##id##_pins,		\
+		.npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins),	\
+		.funcs = (int[]){			\
+			msm_mux_gpio, /* gpio mode */	\
+			msm_mux_##f1,			\
+			msm_mux_##f2,			\
+			msm_mux_##f3,			\
+			msm_mux_##f4,			\
+			msm_mux_##f5,			\
+			msm_mux_##f6,			\
+			msm_mux_##f7,			\
+			msm_mux_##f8,			\
+			msm_mux_##f9			\
+		},				        \
+		.nfuncs = 10,				\
+		.ctl_reg = base + REG_SIZE * id,	\
+		.io_reg = base + 0x4 + REG_SIZE * id,		\
+		.intr_cfg_reg = base + 0x8 + REG_SIZE * id,		\
+		.intr_status_reg = base + 0xc + REG_SIZE * id,	\
+		.intr_target_reg = base + 0x8 + REG_SIZE * id,	\
+		.mux_bit = 2,			\
+		.pull_bit = 0,			\
+		.drv_bit = 6,			\
+		.oe_bit = 9,			\
+		.in_bit = 0,			\
+		.out_bit = 1,			\
+		.intr_enable_bit = 0,		\
+		.intr_status_bit = 0,		\
+		.intr_target_bit = 5,		\
+		.intr_target_kpss_val = 3,	\
+		.intr_raw_status_bit = 4,	\
+		.intr_polarity_bit = 1,		\
+		.intr_detection_bit = 2,	\
+		.intr_detection_width = 2,	\
+	}
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv)	\
+	{					        \
+		.name = #pg_name,			\
+		.pins = pg_name##_pins,			\
+		.npins = (unsigned int)ARRAY_SIZE(pg_name##_pins),	\
+		.ctl_reg = ctl,				\
+		.io_reg = 0,				\
+		.intr_cfg_reg = 0,			\
+		.intr_status_reg = 0,			\
+		.intr_target_reg = 0,			\
+		.mux_bit = -1,				\
+		.pull_bit = pull,			\
+		.drv_bit = drv,				\
+		.oe_bit = -1,				\
+		.in_bit = -1,				\
+		.out_bit = -1,				\
+		.intr_enable_bit = -1,			\
+		.intr_status_bit = -1,			\
+		.intr_target_bit = -1,			\
+		.intr_raw_status_bit = -1,		\
+		.intr_polarity_bit = -1,		\
+		.intr_detection_bit = -1,		\
+		.intr_detection_width = -1,		\
+	}
+
+#define UFS_RESET(pg_name, offset)			\
+	{						\
+		.name = #pg_name,			\
+		.pins = pg_name##_pins,			\
+		.npins = (unsigned int)ARRAY_SIZE(pg_name##_pins),	\
+		.ctl_reg = offset,			\
+		.io_reg = offset + 0x4,			\
+		.intr_cfg_reg = 0,			\
+		.intr_status_reg = 0,			\
+		.intr_target_reg = 0,			\
+		.mux_bit = -1,				\
+		.pull_bit = 3,				\
+		.drv_bit = 0,				\
+		.oe_bit = -1,				\
+		.in_bit = -1,				\
+		.out_bit = 0,				\
+		.intr_enable_bit = -1,			\
+		.intr_status_bit = -1,			\
+		.intr_target_bit = -1,			\
+		.intr_raw_status_bit = -1,		\
+		.intr_polarity_bit = -1,		\
+		.intr_detection_bit = -1,		\
+		.intr_detection_width = -1,		\
+	}
+static const struct pinctrl_pin_desc sdm660_pins[] = {
+	PINCTRL_PIN(0, "GPIO_0"),
+	PINCTRL_PIN(1, "GPIO_1"),
+	PINCTRL_PIN(2, "GPIO_2"),
+	PINCTRL_PIN(3, "GPIO_3"),
+	PINCTRL_PIN(4, "GPIO_4"),
+	PINCTRL_PIN(5, "GPIO_5"),
+	PINCTRL_PIN(6, "GPIO_6"),
+	PINCTRL_PIN(7, "GPIO_7"),
+	PINCTRL_PIN(8, "GPIO_8"),
+	PINCTRL_PIN(9, "GPIO_9"),
+	PINCTRL_PIN(10, "GPIO_10"),
+	PINCTRL_PIN(11, "GPIO_11"),
+	PINCTRL_PIN(12, "GPIO_12"),
+	PINCTRL_PIN(13, "GPIO_13"),
+	PINCTRL_PIN(14, "GPIO_14"),
+	PINCTRL_PIN(15, "GPIO_15"),
+	PINCTRL_PIN(16, "GPIO_16"),
+	PINCTRL_PIN(17, "GPIO_17"),
+	PINCTRL_PIN(18, "GPIO_18"),
+	PINCTRL_PIN(19, "GPIO_19"),
+	PINCTRL_PIN(20, "GPIO_20"),
+	PINCTRL_PIN(21, "GPIO_21"),
+	PINCTRL_PIN(22, "GPIO_22"),
+	PINCTRL_PIN(23, "GPIO_23"),
+	PINCTRL_PIN(24, "GPIO_24"),
+	PINCTRL_PIN(25, "GPIO_25"),
+	PINCTRL_PIN(26, "GPIO_26"),
+	PINCTRL_PIN(27, "GPIO_27"),
+	PINCTRL_PIN(28, "GPIO_28"),
+	PINCTRL_PIN(29, "GPIO_29"),
+	PINCTRL_PIN(30, "GPIO_30"),
+	PINCTRL_PIN(31, "GPIO_31"),
+	PINCTRL_PIN(32, "GPIO_32"),
+	PINCTRL_PIN(33, "GPIO_33"),
+	PINCTRL_PIN(34, "GPIO_34"),
+	PINCTRL_PIN(35, "GPIO_35"),
+	PINCTRL_PIN(36, "GPIO_36"),
+	PINCTRL_PIN(37, "GPIO_37"),
+	PINCTRL_PIN(38, "GPIO_38"),
+	PINCTRL_PIN(39, "GPIO_39"),
+	PINCTRL_PIN(40, "GPIO_40"),
+	PINCTRL_PIN(41, "GPIO_41"),
+	PINCTRL_PIN(42, "GPIO_42"),
+	PINCTRL_PIN(43, "GPIO_43"),
+	PINCTRL_PIN(44, "GPIO_44"),
+	PINCTRL_PIN(45, "GPIO_45"),
+	PINCTRL_PIN(46, "GPIO_46"),
+	PINCTRL_PIN(47, "GPIO_47"),
+	PINCTRL_PIN(48, "GPIO_48"),
+	PINCTRL_PIN(49, "GPIO_49"),
+	PINCTRL_PIN(50, "GPIO_50"),
+	PINCTRL_PIN(51, "GPIO_51"),
+	PINCTRL_PIN(52, "GPIO_52"),
+	PINCTRL_PIN(53, "GPIO_53"),
+	PINCTRL_PIN(54, "GPIO_54"),
+	PINCTRL_PIN(55, "GPIO_55"),
+	PINCTRL_PIN(56, "GPIO_56"),
+	PINCTRL_PIN(57, "GPIO_57"),
+	PINCTRL_PIN(58, "GPIO_58"),
+	PINCTRL_PIN(59, "GPIO_59"),
+	PINCTRL_PIN(60, "GPIO_60"),
+	PINCTRL_PIN(61, "GPIO_61"),
+	PINCTRL_PIN(62, "GPIO_62"),
+	PINCTRL_PIN(63, "GPIO_63"),
+	PINCTRL_PIN(64, "GPIO_64"),
+	PINCTRL_PIN(65, "GPIO_65"),
+	PINCTRL_PIN(66, "GPIO_66"),
+	PINCTRL_PIN(67, "GPIO_67"),
+	PINCTRL_PIN(68, "GPIO_68"),
+	PINCTRL_PIN(69, "GPIO_69"),
+	PINCTRL_PIN(70, "GPIO_70"),
+	PINCTRL_PIN(71, "GPIO_71"),
+	PINCTRL_PIN(72, "GPIO_72"),
+	PINCTRL_PIN(73, "GPIO_73"),
+	PINCTRL_PIN(74, "GPIO_74"),
+	PINCTRL_PIN(75, "GPIO_75"),
+	PINCTRL_PIN(76, "GPIO_76"),
+	PINCTRL_PIN(77, "GPIO_77"),
+	PINCTRL_PIN(78, "GPIO_78"),
+	PINCTRL_PIN(79, "GPIO_79"),
+	PINCTRL_PIN(80, "GPIO_80"),
+	PINCTRL_PIN(81, "GPIO_81"),
+	PINCTRL_PIN(82, "GPIO_82"),
+	PINCTRL_PIN(83, "GPIO_83"),
+	PINCTRL_PIN(84, "GPIO_84"),
+	PINCTRL_PIN(85, "GPIO_85"),
+	PINCTRL_PIN(86, "GPIO_86"),
+	PINCTRL_PIN(87, "GPIO_87"),
+	PINCTRL_PIN(88, "GPIO_88"),
+	PINCTRL_PIN(89, "GPIO_89"),
+	PINCTRL_PIN(90, "GPIO_90"),
+	PINCTRL_PIN(91, "GPIO_91"),
+	PINCTRL_PIN(92, "GPIO_92"),
+	PINCTRL_PIN(93, "GPIO_93"),
+	PINCTRL_PIN(94, "GPIO_94"),
+	PINCTRL_PIN(95, "GPIO_95"),
+	PINCTRL_PIN(96, "GPIO_96"),
+	PINCTRL_PIN(97, "GPIO_97"),
+	PINCTRL_PIN(98, "GPIO_98"),
+	PINCTRL_PIN(99, "GPIO_99"),
+	PINCTRL_PIN(100, "GPIO_100"),
+	PINCTRL_PIN(101, "GPIO_101"),
+	PINCTRL_PIN(102, "GPIO_102"),
+	PINCTRL_PIN(103, "GPIO_103"),
+	PINCTRL_PIN(104, "GPIO_104"),
+	PINCTRL_PIN(105, "GPIO_105"),
+	PINCTRL_PIN(106, "GPIO_106"),
+	PINCTRL_PIN(107, "GPIO_107"),
+	PINCTRL_PIN(108, "GPIO_108"),
+	PINCTRL_PIN(109, "GPIO_109"),
+	PINCTRL_PIN(110, "GPIO_110"),
+	PINCTRL_PIN(111, "GPIO_111"),
+	PINCTRL_PIN(112, "GPIO_112"),
+	PINCTRL_PIN(113, "GPIO_113"),
+	PINCTRL_PIN(114, "SDC1_CLK"),
+	PINCTRL_PIN(115, "SDC1_CMD"),
+	PINCTRL_PIN(116, "SDC1_DATA"),
+	PINCTRL_PIN(117, "SDC2_CLK"),
+	PINCTRL_PIN(118, "SDC2_CMD"),
+	PINCTRL_PIN(119, "SDC2_DATA"),
+	PINCTRL_PIN(120, "SDC1_RCLK"),
+	PINCTRL_PIN(121, "UFS_RESET"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+	static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+
+static const unsigned int sdc1_clk_pins[] = { 114 };
+static const unsigned int sdc1_cmd_pins[] = { 115 };
+static const unsigned int sdc1_data_pins[] = { 116 };
+static const unsigned int sdc2_clk_pins[] = { 117 };
+static const unsigned int sdc2_cmd_pins[] = { 118 };
+static const unsigned int sdc2_data_pins[] = { 119 };
+static const unsigned int sdc1_rclk_pins[] = { 120 };
+static const unsigned int ufs_reset_pins[] = { 121 };
+
+enum sdm660_functions {
+	msm_mux_blsp_spi1,
+	msm_mux_gpio,
+	msm_mux_blsp_uim1,
+	msm_mux_tgu_ch0,
+	msm_mux_qdss_gpio4,
+	msm_mux_atest_gpsadc1,
+	msm_mux_blsp_uart1,
+	msm_mux_SMB_STAT,
+	msm_mux_phase_flag14,
+	msm_mux_blsp_i2c2,
+	msm_mux_phase_flag31,
+	msm_mux_blsp_spi3,
+	msm_mux_blsp_spi3_cs1,
+	msm_mux_blsp_spi3_cs2,
+	msm_mux_wlan1_adc1,
+	msm_mux_atest_usb13,
+	msm_mux_tgu_ch1,
+	msm_mux_qdss_gpio5,
+	msm_mux_atest_gpsadc0,
+	msm_mux_blsp_i2c1,
+	msm_mux_ddr_bist,
+	msm_mux_atest_tsens2,
+	msm_mux_atest_usb1,
+	msm_mux_blsp_spi2,
+	msm_mux_blsp_uim2,
+	msm_mux_phase_flag3,
+	msm_mux_bimc_dte1,
+	msm_mux_wlan1_adc0,
+	msm_mux_atest_usb12,
+	msm_mux_bimc_dte0,
+	msm_mux_blsp_i2c3,
+	msm_mux_wlan2_adc1,
+	msm_mux_atest_usb11,
+	msm_mux_dbg_out,
+	msm_mux_wlan2_adc0,
+	msm_mux_atest_usb10,
+	msm_mux_RCM_MARKER,
+	msm_mux_blsp_spi4,
+	msm_mux_pri_mi2s,
+	msm_mux_phase_flag26,
+	msm_mux_qdss_cti0_a,
+	msm_mux_qdss_cti0_b,
+	msm_mux_qdss_cti1_a,
+	msm_mux_qdss_cti1_b,
+	msm_mux_DP_HOT,
+	msm_mux_pri_mi2s_ws,
+	msm_mux_phase_flag27,
+	msm_mux_blsp_i2c4,
+	msm_mux_phase_flag28,
+	msm_mux_blsp_uart5,
+	msm_mux_blsp_spi5,
+	msm_mux_blsp_uim5,
+	msm_mux_phase_flag5,
+	msm_mux_blsp_i2c5,
+	msm_mux_blsp_spi6,
+	msm_mux_blsp_uart2,
+	msm_mux_blsp_uim6,
+	msm_mux_phase_flag11,
+	msm_mux_vsense_data0,
+	msm_mux_blsp_i2c6,
+	msm_mux_phase_flag12,
+	msm_mux_vsense_data1,
+	msm_mux_phase_flag13,
+	msm_mux_vsense_mode,
+	msm_mux_blsp_spi7,
+	msm_mux_blsp_uart6_a,
+	msm_mux_blsp_uart6_b,
+	msm_mux_sec_mi2s,
+	msm_mux_sndwire_clk,
+	msm_mux_phase_flag17,
+	msm_mux_vsense_clkout,
+	msm_mux_sndwire_data,
+	msm_mux_phase_flag18,
+	msm_mux_WSA_SPKR,
+	msm_mux_blsp_i2c7,
+	msm_mux_phase_flag19,
+	msm_mux_vfr_1,
+	msm_mux_phase_flag20,
+	msm_mux_NFC_INT,
+	msm_mux_blsp_spi8_cs1,
+	msm_mux_blsp_spi8_cs2,
+	msm_mux_m_voc,
+	msm_mux_phase_flag21,
+	msm_mux_NFC_EN,
+	msm_mux_phase_flag22,
+	msm_mux_NFC_DWL,
+	msm_mux_blsp_i2c8_a,
+	msm_mux_blsp_i2c8_b,
+	msm_mux_phase_flag23,
+	msm_mux_NFC_ESE,
+	msm_mux_pwr_modem,
+	msm_mux_phase_flag24,
+	msm_mux_qdss_gpio,
+	msm_mux_cam_mclk,
+	msm_mux_pwr_nav,
+	msm_mux_qdss_gpio0,
+	msm_mux_qspi_data0,
+	msm_mux_pwr_crypto,
+	msm_mux_qdss_gpio1,
+	msm_mux_qspi_data1,
+	msm_mux_agera_pll,
+	msm_mux_qdss_gpio2,
+	msm_mux_qspi_data2,
+	msm_mux_jitter_bist,
+	msm_mux_qdss_gpio3,
+	msm_mux_qdss_gpio7,
+	msm_mux_FL_R3LED,
+	msm_mux_CCI_TIMER0,
+	msm_mux_FL_STROBE,
+	msm_mux_CCI_TIMER1,
+	msm_mux_CAM_LDO1,
+	msm_mux_mdss_vsync0,
+	msm_mux_mdss_vsync1,
+	msm_mux_mdss_vsync2,
+	msm_mux_mdss_vsync3,
+	msm_mux_qdss_gpio9,
+	msm_mux_CAM_IRQ,
+	msm_mux_atest_usb2,
+	msm_mux_cci_i2c,
+	msm_mux_pll_bypassnl,
+	msm_mux_atest_tsens,
+	msm_mux_atest_usb21,
+	msm_mux_pll_reset,
+	msm_mux_atest_usb23,
+	msm_mux_qdss_gpio6,
+	msm_mux_CCI_TIMER3,
+	msm_mux_CCI_ASYNC,
+	msm_mux_qspi_cs,
+	msm_mux_qdss_gpio10,
+	msm_mux_CAM3_STANDBY,
+	msm_mux_CCI_TIMER4,
+	msm_mux_qdss_gpio11,
+	msm_mux_CAM_LDO2,
+	msm_mux_cci_async,
+	msm_mux_qdss_gpio12,
+	msm_mux_CAM0_RST,
+	msm_mux_qdss_gpio13,
+	msm_mux_CAM1_RST,
+	msm_mux_qspi_clk,
+	msm_mux_phase_flag30,
+	msm_mux_qdss_gpio14,
+	msm_mux_qspi_resetn,
+	msm_mux_phase_flag1,
+	msm_mux_qdss_gpio15,
+	msm_mux_CAM0_STANDBY,
+	msm_mux_phase_flag2,
+	msm_mux_CAM1_STANDBY,
+	msm_mux_phase_flag9,
+	msm_mux_CAM2_STANDBY,
+	msm_mux_qspi_data3,
+	msm_mux_phase_flag15,
+	msm_mux_qdss_gpio8,
+	msm_mux_CAM3_RST,
+	msm_mux_CCI_TIMER2,
+	msm_mux_phase_flag16,
+	msm_mux_LCD0_RESET,
+	msm_mux_phase_flag6,
+	msm_mux_SD_CARD,
+	msm_mux_phase_flag29,
+	msm_mux_DP_EN,
+	msm_mux_phase_flag25,
+	msm_mux_USBC_ORIENTATION,
+	msm_mux_phase_flag10,
+	msm_mux_atest_usb20,
+	msm_mux_gcc_gp1,
+	msm_mux_phase_flag4,
+	msm_mux_atest_usb22,
+	msm_mux_USB_PHY,
+	msm_mux_gcc_gp2,
+	msm_mux_atest_char,
+	msm_mux_mdp_vsync,
+	msm_mux_gcc_gp3,
+	msm_mux_atest_char3,
+	msm_mux_FORCE_TOUCH,
+	msm_mux_cri_trng0,
+	msm_mux_atest_char2,
+	msm_mux_cri_trng1,
+	msm_mux_atest_char1,
+	msm_mux_AUDIO_USBC,
+	msm_mux_audio_ref,
+	msm_mux_MDP_VSYNC,
+	msm_mux_cri_trng,
+	msm_mux_atest_char0,
+	msm_mux_US_EURO,
+	msm_mux_LCD_BACKLIGHT,
+	msm_mux_blsp_spi8_a,
+	msm_mux_blsp_spi8_b,
+	msm_mux_sp_cmu,
+	msm_mux_nav_pps_a,
+	msm_mux_nav_pps_b,
+	msm_mux_nav_pps_c,
+	msm_mux_gps_tx_a,
+	msm_mux_gps_tx_b,
+	msm_mux_gps_tx_c,
+	msm_mux_adsp_ext,
+	msm_mux_TS_RESET,
+	msm_mux_ssc_irq,
+	msm_mux_isense_dbg,
+	msm_mux_phase_flag0,
+	msm_mux_phase_flag7,
+	msm_mux_phase_flag8,
+	msm_mux_tsense_pwm1,
+	msm_mux_tsense_pwm2,
+	msm_mux_SENSOR_RST,
+	msm_mux_WMSS_RESETN,
+	msm_mux_HAPTICS_PWM,
+	msm_mux_GPS_eLNA,
+	msm_mux_mss_lte,
+	msm_mux_uim2_data,
+	msm_mux_uim2_clk,
+	msm_mux_uim2_reset,
+	msm_mux_uim2_present,
+	msm_mux_uim1_data,
+	msm_mux_uim1_clk,
+	msm_mux_uim1_reset,
+	msm_mux_uim1_present,
+	msm_mux_uim_batt,
+	msm_mux_pa_indicator,
+	msm_mux_ldo_en,
+	msm_mux_ldo_update,
+	msm_mux_qlink_request,
+	msm_mux_qlink_enable,
+	msm_mux_prng_rosc,
+	msm_mux_LCD_PWR,
+	msm_mux_NA,
+};
+
+static const char * const blsp_spi1_groups[] = {
+	"gpio0", "gpio1", "gpio2", "gpio3", "gpio46",
+};
+static const char * const gpio_groups[] = {
+	"gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+	"gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+	"gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+	"gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+	"gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+	"gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+	"gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+	"gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+	"gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+	"gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+	"gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+	"gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+	"gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+	"gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+	"gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+	"gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+	"gpio111", "gpio112", "gpio113",
+};
+static const char * const blsp_uim1_groups[] = {
+	"gpio0", "gpio1",
+};
+static const char * const tgu_ch0_groups[] = {
+	"gpio0",
+};
+static const char * const qdss_gpio4_groups[] = {
+	"gpio0", "gpio36",
+};
+static const char * const atest_gpsadc1_groups[] = {
+	"gpio0",
+};
+static const char * const blsp_uart1_groups[] = {
+	"gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const SMB_STAT_groups[] = {
+	"gpio5",
+};
+static const char * const phase_flag14_groups[] = {
+	"gpio5",
+};
+static const char * const blsp_i2c2_groups[] = {
+	"gpio6", "gpio7",
+};
+static const char * const phase_flag31_groups[] = {
+	"gpio6",
+};
+static const char * const blsp_spi3_groups[] = {
+	"gpio8", "gpio9", "gpio10", "gpio11",
+};
+static const char * const blsp_spi3_cs1_groups[] = {
+	"gpio30",
+};
+static const char * const blsp_spi3_cs2_groups[] = {
+	"gpio65",
+};
+static const char * const wlan1_adc1_groups[] = {
+	"gpio8",
+};
+static const char * const atest_usb13_groups[] = {
+	"gpio8",
+};
+static const char * const tgu_ch1_groups[] = {
+	"gpio1",
+};
+static const char * const qdss_gpio5_groups[] = {
+	"gpio1", "gpio37",
+};
+static const char * const atest_gpsadc0_groups[] = {
+	"gpio1",
+};
+static const char * const blsp_i2c1_groups[] = {
+	"gpio2", "gpio3",
+};
+static const char * const ddr_bist_groups[] = {
+	"gpio3", "gpio8", "gpio9", "gpio10",
+};
+static const char * const atest_tsens2_groups[] = {
+	"gpio3",
+};
+static const char * const atest_usb1_groups[] = {
+	"gpio3",
+};
+static const char * const blsp_spi2_groups[] = {
+	"gpio4", "gpio5", "gpio6", "gpio7",
+};
+static const char * const blsp_uim2_groups[] = {
+	"gpio4", "gpio5",
+};
+static const char * const phase_flag3_groups[] = {
+	"gpio4",
+};
+static const char * const bimc_dte1_groups[] = {
+	"gpio8", "gpio10",
+};
+static const char * const wlan1_adc0_groups[] = {
+	"gpio9",
+};
+static const char * const atest_usb12_groups[] = {
+	"gpio9",
+};
+static const char * const bimc_dte0_groups[] = {
+	"gpio9", "gpio11",
+};
+static const char * const blsp_i2c3_groups[] = {
+	"gpio10", "gpio11",
+};
+static const char * const wlan2_adc1_groups[] = {
+	"gpio10",
+};
+static const char * const atest_usb11_groups[] = {
+	"gpio10",
+};
+static const char * const dbg_out_groups[] = {
+	"gpio11",
+};
+static const char * const wlan2_adc0_groups[] = {
+	"gpio11",
+};
+static const char * const atest_usb10_groups[] = {
+	"gpio11",
+};
+static const char * const RCM_MARKER_groups[] = {
+	"gpio12", "gpio13",
+};
+static const char * const blsp_spi4_groups[] = {
+	"gpio12", "gpio13", "gpio14", "gpio15",
+};
+static const char * const pri_mi2s_groups[] = {
+	"gpio12", "gpio14", "gpio15", "gpio61",
+};
+static const char * const phase_flag26_groups[] = {
+	"gpio12",
+};
+static const char * const qdss_cti0_a_groups[] = {
+	"gpio49", "gpio50",
+};
+static const char * const qdss_cti0_b_groups[] = {
+	"gpio13", "gpio21",
+};
+static const char * const qdss_cti1_a_groups[] = {
+	"gpio53", "gpio55",
+};
+static const char * const qdss_cti1_b_groups[] = {
+	"gpio12", "gpio66",
+};
+static const char * const DP_HOT_groups[] = {
+	"gpio13",
+};
+static const char * const pri_mi2s_ws_groups[] = {
+	"gpio13",
+};
+static const char * const phase_flag27_groups[] = {
+	"gpio13",
+};
+static const char * const blsp_i2c4_groups[] = {
+	"gpio14", "gpio15",
+};
+static const char * const phase_flag28_groups[] = {
+	"gpio14",
+};
+static const char * const blsp_uart5_groups[] = {
+	"gpio16", "gpio17", "gpio18", "gpio19",
+};
+static const char * const blsp_spi5_groups[] = {
+	"gpio16", "gpio17", "gpio18", "gpio19",
+};
+static const char * const blsp_uim5_groups[] = {
+	"gpio16", "gpio17",
+};
+static const char * const phase_flag5_groups[] = {
+	"gpio17",
+};
+static const char * const blsp_i2c5_groups[] = {
+	"gpio18", "gpio19",
+};
+static const char * const blsp_spi6_groups[] = {
+	"gpio49", "gpio52", "gpio22", "gpio23",
+};
+static const char * const blsp_uart2_groups[] = {
+	"gpio4", "gpio5", "gpio6", "gpio7",
+};
+static const char * const blsp_uim6_groups[] = {
+	"gpio20", "gpio21",
+};
+static const char * const phase_flag11_groups[] = {
+	"gpio21",
+};
+static const char * const vsense_data0_groups[] = {
+	"gpio21",
+};
+static const char * const blsp_i2c6_groups[] = {
+	"gpio22", "gpio23",
+};
+static const char * const phase_flag12_groups[] = {
+	"gpio22",
+};
+static const char * const vsense_data1_groups[] = {
+	"gpio22",
+};
+static const char * const phase_flag13_groups[] = {
+	"gpio23",
+};
+static const char * const vsense_mode_groups[] = {
+	"gpio23",
+};
+static const char * const blsp_spi7_groups[] = {
+	"gpio24", "gpio25", "gpio26", "gpio27",
+};
+static const char * const blsp_uart6_a_groups[] = {
+	"gpio24", "gpio25", "gpio26", "gpio27",
+};
+static const char * const blsp_uart6_b_groups[] = {
+	"gpio28", "gpio29", "gpio30", "gpio31",
+};
+static const char * const sec_mi2s_groups[] = {
+	"gpio24", "gpio25", "gpio26", "gpio27", "gpio62",
+};
+static const char * const sndwire_clk_groups[] = {
+	"gpio24",
+};
+static const char * const phase_flag17_groups[] = {
+	"gpio24",
+};
+static const char * const vsense_clkout_groups[] = {
+	"gpio24",
+};
+static const char * const sndwire_data_groups[] = {
+	"gpio25",
+};
+static const char * const phase_flag18_groups[] = {
+	"gpio25",
+};
+static const char * const WSA_SPKR_groups[] = {
+	"gpio26", "gpio27",
+};
+static const char * const blsp_i2c7_groups[] = {
+	"gpio26", "gpio27",
+};
+static const char * const phase_flag19_groups[] = {
+	"gpio26",
+};
+static const char * const vfr_1_groups[] = {
+	"gpio27",
+};
+static const char * const phase_flag20_groups[] = {
+	"gpio27",
+};
+static const char * const NFC_INT_groups[] = {
+	"gpio28",
+};
+static const char * const blsp_spi8_a_groups[] = {
+	"gpio28", "gpio29", "gpio30", "gpio31",
+};
+static const char * const blsp_spi8_b_groups[] = {
+	"gpio40", "gpio41", "gpio44", "gpio52",
+};
+static const char * const m_voc_groups[] = {
+	"gpio28",
+};
+static const char * const phase_flag21_groups[] = {
+	"gpio28",
+};
+static const char * const NFC_EN_groups[] = {
+	"gpio29",
+};
+static const char * const phase_flag22_groups[] = {
+	"gpio29",
+};
+static const char * const NFC_DWL_groups[] = {
+	"gpio30",
+};
+static const char * const blsp_i2c8_a_groups[] = {
+	"gpio30", "gpio31",
+};
+static const char * const blsp_i2c8_b_groups[] = {
+	"gpio44", "gpio52",
+};
+static const char * const phase_flag23_groups[] = {
+	"gpio30",
+};
+static const char * const NFC_ESE_groups[] = {
+	"gpio31",
+};
+static const char * const pwr_modem_groups[] = {
+	"gpio31",
+};
+static const char * const phase_flag24_groups[] = {
+	"gpio31",
+};
+static const char * const qdss_gpio_groups[] = {
+	"gpio31", "gpio52", "gpio68", "gpio69",
+};
+static const char * const cam_mclk_groups[] = {
+	"gpio32", "gpio33", "gpio34", "gpio35",
+};
+static const char * const pwr_nav_groups[] = {
+	"gpio32",
+};
+static const char * const qdss_gpio0_groups[] = {
+	"gpio32", "gpio67",
+};
+static const char * const qspi_data0_groups[] = {
+	"gpio33",
+};
+static const char * const pwr_crypto_groups[] = {
+	"gpio33",
+};
+static const char * const qdss_gpio1_groups[] = {
+	"gpio33", "gpio63",
+};
+static const char * const qspi_data1_groups[] = {
+	"gpio34",
+};
+static const char * const agera_pll_groups[] = {
+	"gpio34", "gpio36",
+};
+static const char * const qdss_gpio2_groups[] = {
+	"gpio34", "gpio64",
+};
+static const char * const qspi_data2_groups[] = {
+	"gpio35",
+};
+static const char * const jitter_bist_groups[] = {
+	"gpio35",
+};
+static const char * const qdss_gpio3_groups[] = {
+	"gpio35", "gpio56",
+};
+static const char * const qdss_gpio7_groups[] = {
+	"gpio39", "gpio71",
+};
+static const char * const FL_R3LED_groups[] = {
+	"gpio40",
+};
+static const char * const CCI_TIMER0_groups[] = {
+	"gpio40",
+};
+static const char * const FL_STROBE_groups[] = {
+	"gpio41",
+};
+static const char * const CCI_TIMER1_groups[] = {
+	"gpio41",
+};
+static const char * const CAM_LDO1_groups[] = {
+	"gpio42",
+};
+static const char * const mdss_vsync0_groups[] = {
+	"gpio42",
+};
+static const char * const mdss_vsync1_groups[] = {
+	"gpio42",
+};
+static const char * const mdss_vsync2_groups[] = {
+	"gpio42",
+};
+static const char * const mdss_vsync3_groups[] = {
+	"gpio42",
+};
+static const char * const qdss_gpio9_groups[] = {
+	"gpio42", "gpio76",
+};
+static const char * const CAM_IRQ_groups[] = {
+	"gpio43",
+};
+static const char * const atest_usb2_groups[] = {
+	"gpio35",
+};
+static const char * const cci_i2c_groups[] = {
+	"gpio36", "gpio37", "gpio38", "gpio39",
+};
+static const char * const pll_bypassnl_groups[] = {
+	"gpio36",
+};
+static const char * const atest_tsens_groups[] = {
+	"gpio36",
+};
+static const char * const atest_usb21_groups[] = {
+	"gpio36",
+};
+static const char * const pll_reset_groups[] = {
+	"gpio37",
+};
+static const char * const atest_usb23_groups[] = {
+	"gpio37",
+};
+static const char * const qdss_gpio6_groups[] = {
+	"gpio38", "gpio70",
+};
+static const char * const CCI_TIMER3_groups[] = {
+	"gpio43",
+};
+static const char * const CCI_ASYNC_groups[] = {
+	"gpio43", "gpio44",
+};
+static const char * const qspi_cs_groups[] = {
+	"gpio43", "gpio50",
+};
+static const char * const qdss_gpio10_groups[] = {
+	"gpio43", "gpio77",
+};
+static const char * const CAM3_STANDBY_groups[] = {
+	"gpio44",
+};
+static const char * const CCI_TIMER4_groups[] = {
+	"gpio44",
+};
+static const char * const qdss_gpio11_groups[] = {
+	"gpio44", "gpio79",
+};
+static const char * const CAM_LDO2_groups[] = {
+	"gpio45",
+};
+static const char * const cci_async_groups[] = {
+	"gpio45",
+};
+static const char * const qdss_gpio12_groups[] = {
+	"gpio45", "gpio80",
+};
+static const char * const CAM0_RST_groups[] = {
+	"gpio46",
+};
+static const char * const qdss_gpio13_groups[] = {
+	"gpio46", "gpio78",
+};
+static const char * const CAM1_RST_groups[] = {
+	"gpio47",
+};
+static const char * const qspi_clk_groups[] = {
+	"gpio47",
+};
+static const char * const phase_flag30_groups[] = {
+	"gpio47",
+};
+static const char * const qdss_gpio14_groups[] = {
+	"gpio47", "gpio72",
+};
+static const char * const qspi_resetn_groups[] = {
+	"gpio48",
+};
+static const char * const phase_flag1_groups[] = {
+	"gpio48",
+};
+static const char * const qdss_gpio15_groups[] = {
+	"gpio48", "gpio73",
+};
+static const char * const CAM0_STANDBY_groups[] = {
+	"gpio49",
+};
+static const char * const phase_flag2_groups[] = {
+	"gpio49",
+};
+static const char * const CAM1_STANDBY_groups[] = {
+	"gpio50",
+};
+static const char * const phase_flag9_groups[] = {
+	"gpio50",
+};
+static const char * const CAM2_STANDBY_groups[] = {
+	"gpio51",
+};
+static const char * const qspi_data3_groups[] = {
+	"gpio51",
+};
+static const char * const phase_flag15_groups[] = {
+	"gpio51",
+};
+static const char * const qdss_gpio8_groups[] = {
+	"gpio51", "gpio75",
+};
+static const char * const CAM3_RST_groups[] = {
+	"gpio52",
+};
+static const char * const CCI_TIMER2_groups[] = {
+	"gpio52",
+};
+static const char * const phase_flag16_groups[] = {
+	"gpio52",
+};
+static const char * const LCD0_RESET_groups[] = {
+	"gpio53",
+};
+static const char * const phase_flag6_groups[] = {
+	"gpio53",
+};
+static const char * const SD_CARD_groups[] = {
+	"gpio54",
+};
+static const char * const phase_flag29_groups[] = {
+	"gpio54",
+};
+static const char * const DP_EN_groups[] = {
+	"gpio55",
+};
+static const char * const phase_flag25_groups[] = {
+	"gpio55",
+};
+static const char * const USBC_ORIENTATION_groups[] = {
+	"gpio56",
+};
+static const char * const phase_flag10_groups[] = {
+	"gpio56",
+};
+static const char * const atest_usb20_groups[] = {
+	"gpio56",
+};
+static const char * const gcc_gp1_groups[] = {
+	"gpio57", "gpio78",
+};
+static const char * const phase_flag4_groups[] = {
+	"gpio57",
+};
+static const char * const atest_usb22_groups[] = {
+	"gpio57",
+};
+static const char * const USB_PHY_groups[] = {
+	"gpio58",
+};
+static const char * const gcc_gp2_groups[] = {
+	"gpio58", "gpio81",
+};
+static const char * const atest_char_groups[] = {
+	"gpio58",
+};
+static const char * const mdp_vsync_groups[] = {
+	"gpio59", "gpio74",
+};
+static const char * const gcc_gp3_groups[] = {
+	"gpio59", "gpio82",
+};
+static const char * const atest_char3_groups[] = {
+	"gpio59",
+};
+static const char * const FORCE_TOUCH_groups[] = {
+	"gpio60", "gpio73",
+};
+static const char * const cri_trng0_groups[] = {
+	"gpio60",
+};
+static const char * const atest_char2_groups[] = {
+	"gpio60",
+};
+static const char * const cri_trng1_groups[] = {
+	"gpio61",
+};
+static const char * const atest_char1_groups[] = {
+	"gpio61",
+};
+static const char * const AUDIO_USBC_groups[] = {
+	"gpio62",
+};
+static const char * const audio_ref_groups[] = {
+	"gpio62",
+};
+static const char * const MDP_VSYNC_groups[] = {
+	"gpio62",
+};
+static const char * const cri_trng_groups[] = {
+	"gpio62",
+};
+static const char * const atest_char0_groups[] = {
+	"gpio62",
+};
+static const char * const US_EURO_groups[] = {
+	"gpio63",
+};
+static const char * const LCD_BACKLIGHT_groups[] = {
+	"gpio64",
+};
+static const char * const blsp_spi8_cs1_groups[] = {
+	"gpio64",
+};
+static const char * const blsp_spi8_cs2_groups[] = {
+	"gpio76",
+};
+static const char * const sp_cmu_groups[] = {
+	"gpio64",
+};
+static const char * const nav_pps_a_groups[] = {
+	"gpio65",
+};
+static const char * const nav_pps_b_groups[] = {
+	"gpio98",
+};
+static const char * const nav_pps_c_groups[] = {
+	"gpio80",
+};
+static const char * const gps_tx_a_groups[] = {
+	"gpio65",
+};
+static const char * const gps_tx_b_groups[] = {
+	"gpio98",
+};
+static const char * const gps_tx_c_groups[] = {
+	"gpio80",
+};
+static const char * const adsp_ext_groups[] = {
+	"gpio65",
+};
+static const char * const TS_RESET_groups[] = {
+	"gpio66",
+};
+static const char * const ssc_irq_groups[] = {
+	"gpio67", "gpio68", "gpio69", "gpio70", "gpio71", "gpio72", "gpio74",
+	"gpio75", "gpio76",
+};
+static const char * const isense_dbg_groups[] = {
+	"gpio68",
+};
+static const char * const phase_flag0_groups[] = {
+	"gpio68",
+};
+static const char * const phase_flag7_groups[] = {
+	"gpio69",
+};
+static const char * const phase_flag8_groups[] = {
+	"gpio70",
+};
+static const char * const tsense_pwm1_groups[] = {
+	"gpio71",
+};
+static const char * const tsense_pwm2_groups[] = {
+	"gpio71",
+};
+static const char * const SENSOR_RST_groups[] = {
+	"gpio77",
+};
+static const char * const WMSS_RESETN_groups[] = {
+	"gpio78",
+};
+static const char * const HAPTICS_PWM_groups[] = {
+	"gpio79",
+};
+static const char * const GPS_eLNA_groups[] = {
+	"gpio80",
+};
+static const char * const mss_lte_groups[] = {
+	"gpio81", "gpio82",
+};
+static const char * const uim2_data_groups[] = {
+	"gpio83",
+};
+static const char * const uim2_clk_groups[] = {
+	"gpio84",
+};
+static const char * const uim2_reset_groups[] = {
+	"gpio85",
+};
+static const char * const uim2_present_groups[] = {
+	"gpio86",
+};
+static const char * const uim1_data_groups[] = {
+	"gpio87",
+};
+static const char * const uim1_clk_groups[] = {
+	"gpio88",
+};
+static const char * const uim1_reset_groups[] = {
+	"gpio89",
+};
+static const char * const uim1_present_groups[] = {
+	"gpio90",
+};
+static const char * const uim_batt_groups[] = {
+	"gpio91",
+};
+static const char * const pa_indicator_groups[] = {
+	"gpio92",
+};
+static const char * const ldo_en_groups[] = {
+	"gpio97",
+};
+static const char * const ldo_update_groups[] = {
+	"gpio98",
+};
+static const char * const qlink_request_groups[] = {
+	"gpio99",
+};
+static const char * const qlink_enable_groups[] = {
+	"gpio100",
+};
+static const char * const prng_rosc_groups[] = {
+	"gpio102",
+};
+static const char * const LCD_PWR_groups[] = {
+	"gpio113",
+};
+
+static const struct msm_function sdm660_functions[] = {
+	FUNCTION(blsp_spi1),
+	FUNCTION(gpio),
+	FUNCTION(blsp_uim1),
+	FUNCTION(tgu_ch0),
+	FUNCTION(qdss_gpio4),
+	FUNCTION(atest_gpsadc1),
+	FUNCTION(blsp_uart1),
+	FUNCTION(SMB_STAT),
+	FUNCTION(phase_flag14),
+	FUNCTION(blsp_i2c2),
+	FUNCTION(phase_flag31),
+	FUNCTION(blsp_spi3),
+	FUNCTION(blsp_spi3_cs1),
+	FUNCTION(blsp_spi3_cs2),
+	FUNCTION(wlan1_adc1),
+	FUNCTION(atest_usb13),
+	FUNCTION(tgu_ch1),
+	FUNCTION(qdss_gpio5),
+	FUNCTION(atest_gpsadc0),
+	FUNCTION(blsp_i2c1),
+	FUNCTION(ddr_bist),
+	FUNCTION(atest_tsens2),
+	FUNCTION(atest_usb1),
+	FUNCTION(blsp_spi2),
+	FUNCTION(blsp_uim2),
+	FUNCTION(phase_flag3),
+	FUNCTION(bimc_dte1),
+	FUNCTION(wlan1_adc0),
+	FUNCTION(atest_usb12),
+	FUNCTION(bimc_dte0),
+	FUNCTION(blsp_i2c3),
+	FUNCTION(wlan2_adc1),
+	FUNCTION(atest_usb11),
+	FUNCTION(dbg_out),
+	FUNCTION(wlan2_adc0),
+	FUNCTION(atest_usb10),
+	FUNCTION(RCM_MARKER),
+	FUNCTION(blsp_spi4),
+	FUNCTION(pri_mi2s),
+	FUNCTION(phase_flag26),
+	FUNCTION(qdss_cti0_a),
+	FUNCTION(qdss_cti0_b),
+	FUNCTION(qdss_cti1_a),
+	FUNCTION(qdss_cti1_b),
+	FUNCTION(DP_HOT),
+	FUNCTION(pri_mi2s_ws),
+	FUNCTION(phase_flag27),
+	FUNCTION(blsp_i2c4),
+	FUNCTION(phase_flag28),
+	FUNCTION(blsp_uart5),
+	FUNCTION(blsp_spi5),
+	FUNCTION(blsp_uim5),
+	FUNCTION(phase_flag5),
+	FUNCTION(blsp_i2c5),
+	FUNCTION(blsp_spi6),
+	FUNCTION(blsp_uart2),
+	FUNCTION(blsp_uim6),
+	FUNCTION(phase_flag11),
+	FUNCTION(vsense_data0),
+	FUNCTION(blsp_i2c6),
+	FUNCTION(phase_flag12),
+	FUNCTION(vsense_data1),
+	FUNCTION(phase_flag13),
+	FUNCTION(vsense_mode),
+	FUNCTION(blsp_spi7),
+	FUNCTION(blsp_uart6_a),
+	FUNCTION(blsp_uart6_b),
+	FUNCTION(sec_mi2s),
+	FUNCTION(sndwire_clk),
+	FUNCTION(phase_flag17),
+	FUNCTION(vsense_clkout),
+	FUNCTION(sndwire_data),
+	FUNCTION(phase_flag18),
+	FUNCTION(WSA_SPKR),
+	FUNCTION(blsp_i2c7),
+	FUNCTION(phase_flag19),
+	FUNCTION(vfr_1),
+	FUNCTION(phase_flag20),
+	FUNCTION(NFC_INT),
+	FUNCTION(blsp_spi8_cs1),
+	FUNCTION(blsp_spi8_cs2),
+	FUNCTION(m_voc),
+	FUNCTION(phase_flag21),
+	FUNCTION(NFC_EN),
+	FUNCTION(phase_flag22),
+	FUNCTION(NFC_DWL),
+	FUNCTION(blsp_i2c8_a),
+	FUNCTION(blsp_i2c8_b),
+	FUNCTION(phase_flag23),
+	FUNCTION(NFC_ESE),
+	FUNCTION(pwr_modem),
+	FUNCTION(phase_flag24),
+	FUNCTION(qdss_gpio),
+	FUNCTION(cam_mclk),
+	FUNCTION(pwr_nav),
+	FUNCTION(qdss_gpio0),
+	FUNCTION(qspi_data0),
+	FUNCTION(pwr_crypto),
+	FUNCTION(qdss_gpio1),
+	FUNCTION(qspi_data1),
+	FUNCTION(agera_pll),
+	FUNCTION(qdss_gpio2),
+	FUNCTION(qspi_data2),
+	FUNCTION(jitter_bist),
+	FUNCTION(qdss_gpio3),
+	FUNCTION(qdss_gpio7),
+	FUNCTION(FL_R3LED),
+	FUNCTION(CCI_TIMER0),
+	FUNCTION(FL_STROBE),
+	FUNCTION(CCI_TIMER1),
+	FUNCTION(CAM_LDO1),
+	FUNCTION(mdss_vsync0),
+	FUNCTION(mdss_vsync1),
+	FUNCTION(mdss_vsync2),
+	FUNCTION(mdss_vsync3),
+	FUNCTION(qdss_gpio9),
+	FUNCTION(CAM_IRQ),
+	FUNCTION(atest_usb2),
+	FUNCTION(cci_i2c),
+	FUNCTION(pll_bypassnl),
+	FUNCTION(atest_tsens),
+	FUNCTION(atest_usb21),
+	FUNCTION(pll_reset),
+	FUNCTION(atest_usb23),
+	FUNCTION(qdss_gpio6),
+	FUNCTION(CCI_TIMER3),
+	FUNCTION(CCI_ASYNC),
+	FUNCTION(qspi_cs),
+	FUNCTION(qdss_gpio10),
+	FUNCTION(CAM3_STANDBY),
+	FUNCTION(CCI_TIMER4),
+	FUNCTION(qdss_gpio11),
+	FUNCTION(CAM_LDO2),
+	FUNCTION(cci_async),
+	FUNCTION(qdss_gpio12),
+	FUNCTION(CAM0_RST),
+	FUNCTION(qdss_gpio13),
+	FUNCTION(CAM1_RST),
+	FUNCTION(qspi_clk),
+	FUNCTION(phase_flag30),
+	FUNCTION(qdss_gpio14),
+	FUNCTION(qspi_resetn),
+	FUNCTION(phase_flag1),
+	FUNCTION(qdss_gpio15),
+	FUNCTION(CAM0_STANDBY),
+	FUNCTION(phase_flag2),
+	FUNCTION(CAM1_STANDBY),
+	FUNCTION(phase_flag9),
+	FUNCTION(CAM2_STANDBY),
+	FUNCTION(qspi_data3),
+	FUNCTION(phase_flag15),
+	FUNCTION(qdss_gpio8),
+	FUNCTION(CAM3_RST),
+	FUNCTION(CCI_TIMER2),
+	FUNCTION(phase_flag16),
+	FUNCTION(LCD0_RESET),
+	FUNCTION(phase_flag6),
+	FUNCTION(SD_CARD),
+	FUNCTION(phase_flag29),
+	FUNCTION(DP_EN),
+	FUNCTION(phase_flag25),
+	FUNCTION(USBC_ORIENTATION),
+	FUNCTION(phase_flag10),
+	FUNCTION(atest_usb20),
+	FUNCTION(gcc_gp1),
+	FUNCTION(phase_flag4),
+	FUNCTION(atest_usb22),
+	FUNCTION(USB_PHY),
+	FUNCTION(gcc_gp2),
+	FUNCTION(atest_char),
+	FUNCTION(mdp_vsync),
+	FUNCTION(gcc_gp3),
+	FUNCTION(atest_char3),
+	FUNCTION(FORCE_TOUCH),
+	FUNCTION(cri_trng0),
+	FUNCTION(atest_char2),
+	FUNCTION(cri_trng1),
+	FUNCTION(atest_char1),
+	FUNCTION(AUDIO_USBC),
+	FUNCTION(audio_ref),
+	FUNCTION(MDP_VSYNC),
+	FUNCTION(cri_trng),
+	FUNCTION(atest_char0),
+	FUNCTION(US_EURO),
+	FUNCTION(LCD_BACKLIGHT),
+	FUNCTION(blsp_spi8_a),
+	FUNCTION(blsp_spi8_b),
+	FUNCTION(sp_cmu),
+	FUNCTION(nav_pps_a),
+	FUNCTION(nav_pps_b),
+	FUNCTION(nav_pps_c),
+	FUNCTION(gps_tx_a),
+	FUNCTION(gps_tx_b),
+	FUNCTION(gps_tx_c),
+	FUNCTION(adsp_ext),
+	FUNCTION(TS_RESET),
+	FUNCTION(ssc_irq),
+	FUNCTION(isense_dbg),
+	FUNCTION(phase_flag0),
+	FUNCTION(phase_flag7),
+	FUNCTION(phase_flag8),
+	FUNCTION(tsense_pwm1),
+	FUNCTION(tsense_pwm2),
+	FUNCTION(SENSOR_RST),
+	FUNCTION(WMSS_RESETN),
+	FUNCTION(HAPTICS_PWM),
+	FUNCTION(GPS_eLNA),
+	FUNCTION(mss_lte),
+	FUNCTION(uim2_data),
+	FUNCTION(uim2_clk),
+	FUNCTION(uim2_reset),
+	FUNCTION(uim2_present),
+	FUNCTION(uim1_data),
+	FUNCTION(uim1_clk),
+	FUNCTION(uim1_reset),
+	FUNCTION(uim1_present),
+	FUNCTION(uim_batt),
+	FUNCTION(pa_indicator),
+	FUNCTION(ldo_en),
+	FUNCTION(ldo_update),
+	FUNCTION(qlink_request),
+	FUNCTION(qlink_enable),
+	FUNCTION(prng_rosc),
+	FUNCTION(LCD_PWR),
+};
+
+static const struct msm_pingroup sdm660_groups[] = {
+	PINGROUP(0, SOUTH, blsp_spi1, blsp_uart1, blsp_uim1, tgu_ch0, NA, NA,
+		 qdss_gpio4, atest_gpsadc1, NA),
+	PINGROUP(1, SOUTH, blsp_spi1, blsp_uart1, blsp_uim1, tgu_ch1, NA, NA,
+		 qdss_gpio5, atest_gpsadc0, NA),
+	PINGROUP(2, SOUTH, blsp_spi1, blsp_uart1, blsp_i2c1, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(3, SOUTH, blsp_spi1, blsp_uart1, blsp_i2c1, ddr_bist, NA, NA,
+		 atest_tsens2, atest_usb1, NA),
+	PINGROUP(4, NORTH, blsp_spi2, blsp_uim2, blsp_uart2, phase_flag3, NA,
+		 NA, NA, NA, NA),
+	PINGROUP(5, SOUTH, blsp_spi2, blsp_uim2, blsp_uart2, phase_flag14, NA,
+		 NA, NA, NA, NA),
+	PINGROUP(6, SOUTH, blsp_spi2, blsp_i2c2, blsp_uart2, phase_flag31, NA,
+		 NA, NA, NA, NA),
+	PINGROUP(7, SOUTH, blsp_spi2, blsp_i2c2, blsp_uart2, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(8, NORTH, blsp_spi3, ddr_bist, NA, NA, NA, wlan1_adc1,
+		 atest_usb13, bimc_dte1, NA),
+	PINGROUP(9, NORTH, blsp_spi3, ddr_bist, NA, NA, NA, wlan1_adc0,
+		 atest_usb12, bimc_dte0, NA),
+	PINGROUP(10, NORTH, blsp_spi3, blsp_i2c3, ddr_bist, NA, NA, wlan2_adc1,
+		 atest_usb11, bimc_dte1, NA),
+	PINGROUP(11, NORTH, blsp_spi3, blsp_i2c3, NA, dbg_out, wlan2_adc0,
+		 atest_usb10, bimc_dte0, NA, NA),
+	PINGROUP(12, NORTH, blsp_spi4, pri_mi2s, NA, phase_flag26, qdss_cti1_b,
+		 NA, NA, NA, NA),
+	PINGROUP(13, NORTH, blsp_spi4, DP_HOT, pri_mi2s_ws, NA, NA,
+		 phase_flag27, qdss_cti0_b, NA, NA),
+	PINGROUP(14, NORTH, blsp_spi4, blsp_i2c4, pri_mi2s, NA, phase_flag28,
+		 NA, NA, NA, NA),
+	PINGROUP(15, NORTH, blsp_spi4, blsp_i2c4, pri_mi2s, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(16, CENTER, blsp_uart5, blsp_spi5, blsp_uim5, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(17, CENTER, blsp_uart5, blsp_spi5, blsp_uim5, NA, phase_flag5,
+		 NA, NA, NA, NA),
+	PINGROUP(18, CENTER, blsp_uart5, blsp_spi5, blsp_i2c5, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(19, CENTER, blsp_uart5, blsp_spi5, blsp_i2c5, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(20, SOUTH, NA, NA, blsp_uim6, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(21, SOUTH, NA, NA, blsp_uim6, NA, phase_flag11,
+		 qdss_cti0_b, vsense_data0, NA, NA),
+	PINGROUP(22, CENTER, blsp_spi6, NA, blsp_i2c6, NA,
+		 phase_flag12, vsense_data1, NA, NA, NA),
+	PINGROUP(23, CENTER, blsp_spi6, NA, blsp_i2c6, NA,
+		 phase_flag13, vsense_mode, NA, NA, NA),
+	PINGROUP(24, NORTH, blsp_spi7, blsp_uart6_a, sec_mi2s, sndwire_clk, NA,
+		 NA, phase_flag17, vsense_clkout, NA),
+	PINGROUP(25, NORTH, blsp_spi7, blsp_uart6_a, sec_mi2s, sndwire_data, NA,
+		 NA, phase_flag18, NA, NA),
+	PINGROUP(26, NORTH, blsp_spi7, blsp_uart6_a, blsp_i2c7, sec_mi2s, NA,
+		 phase_flag19, NA, NA, NA),
+	PINGROUP(27, NORTH, blsp_spi7, blsp_uart6_a, blsp_i2c7, vfr_1, sec_mi2s,
+		 NA, phase_flag20, NA, NA),
+	PINGROUP(28, CENTER, blsp_spi8_a, blsp_uart6_b, m_voc, NA, phase_flag21,
+		 NA, NA, NA, NA),
+	PINGROUP(29, CENTER, blsp_spi8_a, blsp_uart6_b, NA, NA, phase_flag22,
+		 NA, NA, NA, NA),
+	PINGROUP(30, CENTER, blsp_spi8_a, blsp_uart6_b, blsp_i2c8_a,
+		 blsp_spi3_cs1, NA, phase_flag23, NA, NA, NA),
+	PINGROUP(31, CENTER, blsp_spi8_a, blsp_uart6_b, blsp_i2c8_a, pwr_modem,
+		 NA, phase_flag24, qdss_gpio, NA, NA),
+	PINGROUP(32, SOUTH, cam_mclk, pwr_nav, NA, NA, qdss_gpio0, NA, NA, NA,
+		 NA),
+	PINGROUP(33, SOUTH, cam_mclk, qspi_data0, pwr_crypto, NA, NA,
+		 qdss_gpio1, NA, NA, NA),
+	PINGROUP(34, SOUTH, cam_mclk, qspi_data1, agera_pll, NA, NA,
+		 qdss_gpio2, NA, NA, NA),
+	PINGROUP(35, SOUTH, cam_mclk, qspi_data2, jitter_bist, NA, NA,
+		 qdss_gpio3, NA, atest_usb2, NA),
+	PINGROUP(36, SOUTH, cci_i2c, pll_bypassnl, agera_pll, NA, NA,
+		 qdss_gpio4, atest_tsens, atest_usb21, NA),
+	PINGROUP(37, SOUTH, cci_i2c, pll_reset, NA, NA, qdss_gpio5,
+		 atest_usb23, NA, NA, NA),
+	PINGROUP(38, SOUTH, cci_i2c, NA, NA, qdss_gpio6, NA, NA, NA, NA, NA),
+	PINGROUP(39, SOUTH, cci_i2c, NA, NA, qdss_gpio7, NA, NA, NA, NA, NA),
+	PINGROUP(40, SOUTH, CCI_TIMER0, NA, blsp_spi8_b, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(41, SOUTH, CCI_TIMER1, NA, blsp_spi8_b, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(42, SOUTH, mdss_vsync0, mdss_vsync1, mdss_vsync2, mdss_vsync3,
+		 NA, NA, qdss_gpio9, NA, NA),
+	PINGROUP(43, SOUTH, CCI_TIMER3, CCI_ASYNC, qspi_cs, NA, NA,
+		 qdss_gpio10, NA, NA, NA),
+	PINGROUP(44, SOUTH, CCI_TIMER4, CCI_ASYNC, blsp_spi8_b, blsp_i2c8_b, NA,
+		 NA, qdss_gpio11, NA, NA),
+	PINGROUP(45, SOUTH, cci_async, NA, NA, qdss_gpio12, NA, NA, NA, NA, NA),
+	PINGROUP(46, SOUTH, blsp_spi1, NA, NA, qdss_gpio13, NA, NA, NA, NA, NA),
+	PINGROUP(47, SOUTH, qspi_clk, NA, phase_flag30, qdss_gpio14, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(48, SOUTH, NA, phase_flag1, qdss_gpio15, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(49, SOUTH, blsp_spi6, phase_flag2, qdss_cti0_a, NA, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(50, SOUTH, qspi_cs, NA, phase_flag9, qdss_cti0_a, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(51, SOUTH, qspi_data3, NA, phase_flag15, qdss_gpio8, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(52, SOUTH, CCI_TIMER2, blsp_spi8_b, blsp_i2c8_b, blsp_spi6,
+		 phase_flag16, qdss_gpio, NA, NA, NA),
+	PINGROUP(53, NORTH, NA, phase_flag6, qdss_cti1_a, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(54, NORTH, NA, NA, phase_flag29, NA, NA, NA, NA, NA, NA),
+	PINGROUP(55, SOUTH, NA, phase_flag25, qdss_cti1_a, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(56, SOUTH, NA, phase_flag10, qdss_gpio3, NA, atest_usb20, NA,
+		 NA, NA, NA),
+	PINGROUP(57, SOUTH, gcc_gp1, NA, phase_flag4, atest_usb22, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(58, SOUTH, USB_PHY, gcc_gp2, NA, NA, atest_char, NA, NA, NA,
+		 NA),
+	PINGROUP(59, NORTH, mdp_vsync, gcc_gp3, NA, NA, atest_char3, NA, NA,
+		 NA, NA),
+	PINGROUP(60, NORTH, cri_trng0, NA, NA, atest_char2, NA, NA, NA, NA, NA),
+	PINGROUP(61, NORTH, pri_mi2s, cri_trng1, NA, NA, atest_char1, NA, NA,
+		 NA, NA),
+	PINGROUP(62, NORTH, sec_mi2s, audio_ref, MDP_VSYNC, cri_trng, NA, NA,
+		 atest_char0, NA, NA),
+	PINGROUP(63, NORTH, NA, NA, NA, qdss_gpio1, NA, NA, NA, NA, NA),
+	PINGROUP(64, SOUTH, blsp_spi8_cs1, sp_cmu, NA, NA, qdss_gpio2, NA, NA,
+		 NA, NA),
+	PINGROUP(65, SOUTH, NA, nav_pps_a, nav_pps_a, gps_tx_a, blsp_spi3_cs2,
+		 adsp_ext, NA, NA, NA),
+	PINGROUP(66, NORTH, NA, NA, qdss_cti1_b, NA, NA, NA, NA, NA, NA),
+	PINGROUP(67, NORTH, NA, NA, qdss_gpio0, NA, NA, NA, NA, NA, NA),
+	PINGROUP(68, NORTH, isense_dbg, NA, phase_flag0, qdss_gpio, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(69, NORTH, NA, phase_flag7, qdss_gpio, NA, NA, NA, NA, NA, NA),
+	PINGROUP(70, NORTH, NA, phase_flag8, qdss_gpio6, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(71, NORTH, NA, NA, qdss_gpio7, tsense_pwm1, tsense_pwm2, NA,
+		 NA, NA, NA),
+	PINGROUP(72, NORTH, NA, qdss_gpio14, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(73, NORTH, NA, NA, qdss_gpio15, NA, NA, NA, NA, NA, NA),
+	PINGROUP(74, NORTH, mdp_vsync, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(75, NORTH, NA, NA, qdss_gpio8, NA, NA, NA, NA, NA, NA),
+	PINGROUP(76, NORTH, blsp_spi8_cs2, NA, NA, NA, qdss_gpio9, NA, NA, NA,
+		 NA),
+	PINGROUP(77, NORTH, NA, NA, qdss_gpio10, NA, NA, NA, NA, NA, NA),
+	PINGROUP(78, NORTH, gcc_gp1, NA, qdss_gpio13, NA, NA, NA, NA, NA, NA),
+	PINGROUP(79, SOUTH, NA, NA, qdss_gpio11, NA, NA, NA, NA, NA, NA),
+	PINGROUP(80, SOUTH, nav_pps_b, nav_pps_b, gps_tx_c, NA, NA, qdss_gpio12,
+		 NA, NA, NA),
+	PINGROUP(81, CENTER, mss_lte, gcc_gp2, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(82, CENTER, mss_lte, gcc_gp3, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(83, SOUTH, uim2_data, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(84, SOUTH, uim2_clk, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(85, SOUTH, uim2_reset, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(86, SOUTH, uim2_present, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(87, SOUTH, uim1_data, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(88, SOUTH, uim1_clk, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(89, SOUTH, uim1_reset, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(90, SOUTH, uim1_present, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(91, SOUTH, uim_batt, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(92, SOUTH, NA, NA, pa_indicator, NA, NA, NA, NA, NA, NA),
+	PINGROUP(93, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(94, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(95, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(96, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(97, SOUTH, NA, ldo_en, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(98, SOUTH, NA, nav_pps_c, nav_pps_c, gps_tx_b, ldo_update, NA,
+		 NA, NA, NA),
+	PINGROUP(99, SOUTH, qlink_request, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(100, SOUTH, qlink_enable, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(101, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(102, SOUTH, NA, prng_rosc, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(103, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(104, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(105, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(106, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(107, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(108, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(109, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(110, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(111, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(112, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(113, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	SDC_QDSD_PINGROUP(sdc1_clk, 0x99a000, 13, 6),
+	SDC_QDSD_PINGROUP(sdc1_cmd, 0x99a000, 11, 3),
+	SDC_QDSD_PINGROUP(sdc1_data, 0x99a000, 9, 0),
+	SDC_QDSD_PINGROUP(sdc2_clk, 0x99b000, 14, 6),
+	SDC_QDSD_PINGROUP(sdc2_cmd, 0x99b000, 11, 3),
+	SDC_QDSD_PINGROUP(sdc2_data, 0x99b000, 9, 0),
+	SDC_QDSD_PINGROUP(sdc1_rclk, 0x99a000, 15, 0),
+	UFS_RESET(ufs_reset, 0x9a3000),
+};
+
+static const struct msm_pinctrl_soc_data sdm660_pinctrl = {
+	.pins = sdm660_pins,
+	.npins = ARRAY_SIZE(sdm660_pins),
+	.functions = sdm660_functions,
+	.nfunctions = ARRAY_SIZE(sdm660_functions),
+	.groups = sdm660_groups,
+	.ngroups = ARRAY_SIZE(sdm660_groups),
+	.ngpios = 114,
+};
+
+static int sdm660_pinctrl_probe(struct platform_device *pdev)
+{
+	return msm_pinctrl_probe(pdev, &sdm660_pinctrl);
+}
+
+static const struct of_device_id sdm660_pinctrl_of_match[] = {
+	{ .compatible = "qcom,sdm660-pinctrl", },
+	{ },
+};
+
+static struct platform_driver sdm660_pinctrl_driver = {
+	.driver = {
+		.name = "sdm660-pinctrl",
+		.of_match_table = sdm660_pinctrl_of_match,
+	},
+	.probe = sdm660_pinctrl_probe,
+	.remove = msm_pinctrl_remove,
+};
+
+static int __init sdm660_pinctrl_init(void)
+{
+	return platform_driver_register(&sdm660_pinctrl_driver);
+}
+arch_initcall(sdm660_pinctrl_init);
+
+static void __exit sdm660_pinctrl_exit(void)
+{
+	platform_driver_unregister(&sdm660_pinctrl_driver);
+}
+module_exit(sdm660_pinctrl_exit);
+
+MODULE_DESCRIPTION("QTI sdm660 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, sdm660_pinctrl_of_match);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 4fe58bd..25cf988 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -87,7 +87,7 @@
 
 #define IPA_QMAP_ID_BYTE 0
 
-#define IPA_TX_MAX_DESC (20)
+#define IPA_TX_MAX_DESC (50)
 
 static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags);
 static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys);
@@ -197,6 +197,15 @@ static void ipa3_wq_write_done_status(int src_pipe,
 	ipa3_wq_write_done_common(sys, tx_pkt);
 }
 
+static void ipa3_tasklet_schd_work(struct work_struct *work)
+{
+	struct ipa3_sys_context *sys;
+
+	sys = container_of(work, struct ipa3_sys_context, tasklet_work);
+	if (atomic_read(&sys->xmit_eot_cnt))
+		tasklet_schedule(&sys->tasklet);
+}
+
 /**
  * ipa_write_done() - this function will be (eventually) called when a Tx
  * operation is complete
@@ -235,10 +244,13 @@ static void ipa3_tasklet_write_done(unsigned long data)
 		 * to watchdog bark. For avoiding these scenarios exit from
 		 * tasklet after reaching max limit.
 		 */
-		if (max_tx_pkt == IPA_TX_MAX_DESC)
+		if (max_tx_pkt >= IPA_TX_MAX_DESC)
 			break;
 	}
 	spin_unlock_bh(&sys->spinlock);
+
+	if (max_tx_pkt >= IPA_TX_MAX_DESC)
+		queue_work(sys->tasklet_wq, &sys->tasklet_work);
 }
 
 
@@ -1040,6 +1052,16 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 			goto fail_wq2;
 		}
 
+		snprintf(buff, IPA_RESOURCE_NAME_MAX, "ipataskletwq%d",
+				sys_in->client);
+		ep->sys->tasklet_wq = alloc_workqueue(buff,
+				WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 1);
+		if (!ep->sys->tasklet_wq) {
+			IPAERR("failed to create rep wq for client %d\n",
+					sys_in->client);
+			result = -EFAULT;
+			goto fail_wq3;
+		}
 		INIT_LIST_HEAD(&ep->sys->head_desc_list);
 		INIT_LIST_HEAD(&ep->sys->rcycl_list);
 		spin_lock_init(&ep->sys->spinlock);
@@ -1088,6 +1110,8 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 	atomic_set(&ep->sys->xmit_eot_cnt, 0);
 	tasklet_init(&ep->sys->tasklet, ipa3_tasklet_write_done,
 			(unsigned long) ep->sys);
+	INIT_WORK(&ep->sys->tasklet_work,
+		ipa3_tasklet_schd_work);
 	ep->skip_ep_cfg = sys_in->skip_ep_cfg;
 	if (ipa3_assign_policy(sys_in, ep->sys)) {
 		IPAERR("failed to sys ctx for client %d\n", sys_in->client);
@@ -1273,6 +1297,8 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 fail_gen2:
 	ipa_pm_deregister(ep->sys->pm_hdl);
 fail_pm:
+	destroy_workqueue(ep->sys->tasklet_wq);
+fail_wq3:
 	destroy_workqueue(ep->sys->repl_wq);
 fail_wq2:
 	destroy_workqueue(ep->sys->wq);
@@ -1402,6 +1428,8 @@ int ipa3_teardown_sys_pipe(u32 clnt_hdl)
 	}
 	if (ep->sys->repl_wq)
 		flush_workqueue(ep->sys->repl_wq);
+	if (ep->sys->tasklet_wq)
+		flush_workqueue(ep->sys->tasklet_wq);
 	if (IPA_CLIENT_IS_CONS(ep->client))
 		ipa3_cleanup_rx(ep->sys);
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index bb6f7a0..a270f44b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -1031,6 +1031,7 @@ struct ipa3_sys_context {
 	struct list_head pending_pkts[GSI_VEID_MAX];
 	atomic_t xmit_eot_cnt;
 	struct tasklet_struct tasklet;
+	struct work_struct tasklet_work;
 
 	/* ordering is important - mutable fields go above */
 	struct ipa3_ep_context *ep;
@@ -1044,6 +1045,7 @@ struct ipa3_sys_context {
 	u32 pm_hdl;
 	unsigned int napi_sch_cnt;
 	unsigned int napi_comp_cnt;
+	struct workqueue_struct *tasklet_wq;
 	/* ordering is important - other immutable fields go below */
 };
 
diff --git a/drivers/power/supply/qcom/fg-alg.c b/drivers/power/supply/qcom/fg-alg.c
index ead4839..c17c61e 100644
--- a/drivers/power/supply/qcom/fg-alg.c
+++ b/drivers/power/supply/qcom/fg-alg.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"ALG: %s: " fmt, __func__
@@ -714,7 +714,7 @@ void cap_learning_abort(struct cap_learning *cl)
 	pr_debug("Aborting cap_learning\n");
 	cl->active = false;
 	cl->init_cap_uah = 0;
-	mutex_lock(&cl->lock);
+	mutex_unlock(&cl->lock);
 }
 
 /**
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 98c3c24..0d74602e 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -274,6 +274,22 @@
 	  Say M here if you want to include support for the Qualcomm RPM as a
 	  module. This will build a module called "qcom-smd-rpm".
 
+config MSM_SPM
+        bool "Driver support for SPM and AVS wrapper hardware"
+        help
+          Enables the support for SPM and AVS wrapper hardware on MSMs. SPM
+          hardware is used to manage the processor power during sleep. The
+          driver allows configuring SPM to allow different low power modes for
+          both core and L2.
+
+config MSM_L2_SPM
+        bool "SPM support for L2 cache"
+        help
+          Enable SPM driver support for L2 cache. Some MSM chipsets allow
+          control of L2 cache low power mode with a Subsystem Power manager.
+          Enabling this driver allows configuring L2 SPM for low power modes
+          on supported chipsets.
+
 config QCOM_SCM
 	bool "Secure Channel Manager (SCM) support"
 	default n
@@ -873,6 +889,15 @@
 	  An offline CPU is considered as a reserved CPU since this OS can't use
 	  it.
 
+config QTI_HW_KEY_MANAGER
+	tristate "Enable QTI Hardware Key Manager for storage encryption"
+	default n
+	help
+	 Say 'Y' to enable the hardware key manager driver used to operate
+	 and access key manager hardware block. This is used to interface with
+	 HWKM hardware to perform key operations from the kernel which will
+	 be used for storage encryption.
+
 source "drivers/soc/qcom/icnss2/Kconfig"
 
 config ICNSS
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 4856a43..f5b2b90 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -16,6 +16,7 @@
 qcom_rpmh-y			+= rpmh.o
 obj-$(CONFIG_QCOM_SMD_RPM)	+= smd-rpm.o
 obj-$(CONFIG_QCOM_SMEM) +=	smem.o
+obj-$(CONFIG_MSM_SPM) += msm-spm.o spm_devices.o
 obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o
 obj-$(CONFIG_QCOM_SMP2P)	+= smp2p.o
 obj-$(CONFIG_QCOM_SMSM)	+= smsm.o
@@ -94,11 +95,14 @@
 obj-$(CONFIG_MSM_RPM_SMD)   +=  rpm-smd-debug.o
 endif
 obj-$(CONFIG_QCOM_CDSP_RM) += cdsprm.o
-obj-$(CONFIG_ICNSS) += icnss.o
-obj-$(CONFIG_ICNSS_QMI) += icnss_qmi.o wlan_firmware_service_v01.o
+obj-$(CONFIG_ICNSS) += msm_icnss.o
+msm_icnss-y := icnss.o
+msm_icnss-$(CONFIG_ICNSS_QMI) += icnss_qmi.o wlan_firmware_service_v01.o
 obj-$(CONFIG_RMNET_CTL) += rmnet_ctl/
 obj-$(CONFIG_QCOM_CX_IPEAK) += cx_ipeak.o
 obj-$(CONFIG_QTI_L2_REUSE) += l2_reuse.o
 obj-$(CONFIG_ICNSS2) += icnss2/
 obj-$(CONFIG_QTI_CRYPTO_COMMON) += crypto-qti-common.o
 obj-$(CONFIG_QTI_CRYPTO_TZ) += crypto-qti-tz.o
+obj-$(CONFIG_QTI_HW_KEY_MANAGER) += hwkm_qti.o
+hwkm_qti-y += hwkm.o
diff --git a/drivers/soc/qcom/hwkm.c b/drivers/soc/qcom/hwkm.c
new file mode 100644
index 0000000..af19d18
--- /dev/null
+++ b/drivers/soc/qcom/hwkm.c
@@ -0,0 +1,1214 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * QTI hardware key manager driver.
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/bitops.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <linux/iommu.h>
+
+#include <linux/hwkm.h>
+#include "hwkmregs.h"
+#include "hwkm_serialize.h"
+
+#define BYTES_TO_WORDS(bytes) (((bytes) + 3) / 4)
+
+#define WRITE_TO_KDF_PACKET(cmd_ptr, src, len)	\
+	do {					\
+		memcpy(cmd_ptr, src, len);	\
+		cmd_ptr += len;			\
+	} while (0)
+
+#define ASYNC_CMD_HANDLING false
+
+// Maximum number of times to poll
+#define MAX_RETRIES 20000
+
+int retries;
+#define WAIT_UNTIL(cond)			\
+for (retries = 0; !(cond) && (retries < MAX_RETRIES); retries++)
+
+#define ICEMEM_SLAVE_TPKEY_VAL	0x192
+#define ICEMEM_SLAVE_TPKEY_SLOT	0x92
+#define KM_MASTER_TPKEY_SLOT	10
+
+struct hwkm_clk_info {
+	struct list_head list;
+	struct clk *clk;
+	const char *name;
+	u32 max_freq;
+	u32 min_freq;
+	u32 curr_freq;
+	bool enabled;
+};
+
+struct hwkm_device {
+	struct device *dev;
+	void __iomem *km_base;
+	void __iomem *ice_base;
+	struct resource *km_res;
+	struct resource *ice_res;
+	struct list_head clk_list_head;
+	bool is_hwkm_clk_available;
+	bool is_hwkm_enabled;
+};
+
+static struct hwkm_device *km_device;
+
+#define qti_hwkm_readl(hwkm, reg, dest)				\
+	(((dest) == KM_MASTER) ?				\
+	(readl_relaxed((hwkm)->km_base + (reg))) :		\
+	(readl_relaxed((hwkm)->ice_base + (reg))))
+#define qti_hwkm_writel(hwkm, val, reg, dest)			\
+	(((dest) == KM_MASTER) ?				\
+	(writel_relaxed((val), (hwkm)->km_base + (reg))) :	\
+	(writel_relaxed((val), (hwkm)->ice_base + (reg))))
+#define qti_hwkm_setb(hwkm, reg, nr, dest) {			\
+	u32 val = qti_hwkm_readl(hwkm, reg, dest);		\
+	val |= (0x1 << nr);					\
+	qti_hwkm_writel(hwkm, val, reg, dest);			\
+}
+#define qti_hwkm_clearb(hwkm, reg, nr, dest) {			\
+	u32 val = qti_hwkm_readl(hwkm, reg, dest);		\
+	val &= ~(0x1 << nr);					\
+	qti_hwkm_writel(hwkm, val, reg, dest);			\
+}
+
+static inline bool qti_hwkm_testb(struct hwkm_device *hwkm, u32 reg, u8 nr,
+				  enum hwkm_destination dest)
+{
+	u32 val = qti_hwkm_readl(hwkm, reg, dest);
+
+	val = (val >> nr) & 0x1;
+	if (val == 0)
+		return false;
+	return true;
+}
+
+static inline unsigned int qti_hwkm_get_reg_data(struct hwkm_device *dev,
+						 u32 reg, u32 offset, u32 mask,
+						 enum hwkm_destination dest)
+{
+	u32 val = 0;
+
+	val = qti_hwkm_readl(dev, reg, dest);
+	return ((val & mask) >> offset);
+}
+
+/**
+ * @brief Send a command packet to the HWKM Master instance as described
+ *        in section 3.2.5.1 of Key Manager HPG
+ *        - Clear CMD FIFO
+ *        - Clear Error Status Register
+ *        - Write CMD_ENABLE = 1
+ *        - for word in cmd_packet:
+ *          - poll until CMD_FIFO_AVAILABLE_SPACE > 0.
+ *            Timeout error after 1,000 retries.
+ *          - write word to CMD register
+ *        - for word in rsp_packet:
+ *          - poll until RSP_FIFO_AVAILABLE_DATA > 0.
+ *            Timeout error after 1,000 retries.
+ *          - read word from RSP register
+ *        - Verify CMD_DONE == 1
+ *        - Clear CMD_DONE
+ *
+ * @return HWKM_SUCCESS if successful. HWKW Error Code otherwise.
+ */
+
+static int qti_hwkm_master_transaction(struct hwkm_device *dev,
+				       const uint32_t *cmd_packet,
+				       size_t cmd_words,
+				       uint32_t *rsp_packet,
+				       size_t rsp_words)
+{
+	int i = 0;
+	int err = 0;
+
+	// Clear CMD FIFO
+	qti_hwkm_setb(dev, QTI_HWKM_MASTER_RG_BANK2_BANKN_CTL,
+			CMD_FIFO_CLEAR_BIT, KM_MASTER);
+	/* Write memory barrier */
+	wmb();
+	qti_hwkm_clearb(dev, QTI_HWKM_MASTER_RG_BANK2_BANKN_CTL,
+			CMD_FIFO_CLEAR_BIT, KM_MASTER);
+	/* Write memory barrier */
+	wmb();
+
+	// Clear previous CMD errors
+	qti_hwkm_writel(dev, 0x0, QTI_HWKM_MASTER_RG_BANK2_BANKN_ESR,
+			KM_MASTER);
+	/* Write memory barrier */
+	wmb();
+
+	// Enable command
+	qti_hwkm_setb(dev, QTI_HWKM_MASTER_RG_BANK2_BANKN_CTL, CMD_ENABLE_BIT,
+			KM_MASTER);
+	/* Write memory barrier */
+	wmb();
+
+	if (qti_hwkm_testb(dev, QTI_HWKM_MASTER_RG_BANK2_BANKN_CTL,
+			CMD_FIFO_CLEAR_BIT, KM_MASTER)) {
+
+		pr_err("%s: CMD_FIFO_CLEAR_BIT not set\n", __func__);
+		err = -1;
+		return -err;
+	}
+
+	for (i = 0; i < cmd_words; i++) {
+		WAIT_UNTIL(qti_hwkm_get_reg_data(dev,
+			QTI_HWKM_MASTER_RG_BANK2_BANKN_STATUS,
+			CMD_FIFO_AVAILABLE_SPACE, CMD_FIFO_AVAILABLE_SPACE_MASK,
+			KM_MASTER) > 0);
+		if (qti_hwkm_get_reg_data(dev,
+			QTI_HWKM_MASTER_RG_BANK2_BANKN_STATUS,
+			CMD_FIFO_AVAILABLE_SPACE, CMD_FIFO_AVAILABLE_SPACE_MASK,
+			KM_MASTER) == 0) {
+			pr_err("%s: cmd fifo space not available\n", __func__);
+			err = -1;
+			return err;
+		}
+		qti_hwkm_writel(dev, cmd_packet[i],
+				QTI_HWKM_MASTER_RG_BANK2_CMD_0, KM_MASTER);
+		/* Write memory barrier */
+		wmb();
+	}
+
+	for (i = 0; i < rsp_words; i++) {
+		WAIT_UNTIL(qti_hwkm_get_reg_data(dev,
+			QTI_HWKM_MASTER_RG_BANK2_BANKN_STATUS,
+			RSP_FIFO_AVAILABLE_DATA, RSP_FIFO_AVAILABLE_DATA_MASK,
+			KM_MASTER) > 0);
+		if (qti_hwkm_get_reg_data(dev,
+			QTI_HWKM_MASTER_RG_BANK2_BANKN_STATUS,
+			RSP_FIFO_AVAILABLE_DATA, RSP_FIFO_AVAILABLE_DATA_MASK,
+			KM_MASTER) == 0) {
+			pr_err("%s: rsp fifo data not available\n", __func__);
+			err = -1;
+			return err;
+		}
+		rsp_packet[i] = qti_hwkm_readl(dev,
+				QTI_HWKM_MASTER_RG_BANK2_RSP_0, KM_MASTER);
+	}
+
+	if (!qti_hwkm_testb(dev, QTI_HWKM_MASTER_RG_BANK2_BANKN_IRQ_STATUS,
+			CMD_DONE_BIT, KM_MASTER)) {
+		pr_err("%s: CMD_DONE_BIT not set\n", __func__);
+		err = -1;
+		return err;
+	}
+
+	// Clear CMD_DONE status bit
+	qti_hwkm_setb(dev, QTI_HWKM_MASTER_RG_BANK2_BANKN_IRQ_STATUS,
+			CMD_DONE_BIT, KM_MASTER);
+	/* Write memory barrier */
+	wmb();
+
+	return err;
+}
+
+/**
+ * @brief Send a command packet to the HWKM ICE slave instance as described in
+ *        section 3.2.5.1 of Key Manager HPG
+ *        - Clear CMD FIFO
+ *        - Clear Error Status Register
+ *        - Write CMD_ENABLE = 1
+ *        - for word in cmd_packet:
+ *          - poll until CMD_FIFO_AVAILABLE_SPACE > 0.
+ *            Timeout error after 1,000 retries.
+ *          - write word to CMD register
+ *        - for word in rsp_packet:
+ *          - poll until RSP_FIFO_AVAILABLE_DATA > 0.
+ *            Timeout error after 1,000 retries.
+ *          - read word from RSP register
+ *        - Verify CMD_DONE == 1
+ *        - Clear CMD_DONE
+ *
+ * @return HWKM_SUCCESS if successful. HWKW Error Code otherwise.
+ */
+
+static int qti_hwkm_ice_transaction(struct hwkm_device *dev,
+				    const uint32_t *cmd_packet,
+				    size_t cmd_words,
+				    uint32_t *rsp_packet,
+				    size_t rsp_words)
+{
+	int i = 0;
+	int err = 0;
+
+	// Clear CMD FIFO
+	qti_hwkm_setb(dev, QTI_HWKM_ICE_RG_BANK0_BANKN_CTL,
+			CMD_FIFO_CLEAR_BIT, ICEMEM_SLAVE);
+	/* Write memory barrier */
+	wmb();
+	qti_hwkm_clearb(dev, QTI_HWKM_ICE_RG_BANK0_BANKN_CTL,
+			CMD_FIFO_CLEAR_BIT, ICEMEM_SLAVE);
+	/* Write memory barrier */
+	wmb();
+
+	// Clear previous CMD errors
+	qti_hwkm_writel(dev, 0x0, QTI_HWKM_ICE_RG_BANK0_BANKN_ESR,
+			ICEMEM_SLAVE);
+	/* Write memory barrier */
+	wmb();
+
+	// Enable command
+	qti_hwkm_setb(dev, QTI_HWKM_ICE_RG_BANK0_BANKN_CTL, CMD_ENABLE_BIT,
+			ICEMEM_SLAVE);
+	/* Write memory barrier */
+	wmb();
+
+	if (qti_hwkm_testb(dev, QTI_HWKM_ICE_RG_BANK0_BANKN_CTL,
+			CMD_FIFO_CLEAR_BIT, ICEMEM_SLAVE)) {
+
+		pr_err("%s: CMD_FIFO_CLEAR_BIT not set\n", __func__);
+		err = -1;
+		return err;
+	}
+
+	for (i = 0; i < cmd_words; i++) {
+		WAIT_UNTIL(qti_hwkm_get_reg_data(dev,
+			QTI_HWKM_ICE_RG_BANK0_BANKN_STATUS,
+			CMD_FIFO_AVAILABLE_SPACE, CMD_FIFO_AVAILABLE_SPACE_MASK,
+			ICEMEM_SLAVE) > 0);
+		if (qti_hwkm_get_reg_data(dev,
+			QTI_HWKM_ICE_RG_BANK0_BANKN_STATUS,
+			CMD_FIFO_AVAILABLE_SPACE, CMD_FIFO_AVAILABLE_SPACE_MASK,
+			ICEMEM_SLAVE) == 0) {
+			pr_err("%s: cmd fifo space not available\n", __func__);
+			err = -1;
+			return err;
+		}
+		qti_hwkm_writel(dev, cmd_packet[i],
+				QTI_HWKM_ICE_RG_BANK0_CMD_0, ICEMEM_SLAVE);
+		/* Write memory barrier */
+		wmb();
+	}
+
+	for (i = 0; i < rsp_words; i++) {
+		WAIT_UNTIL(qti_hwkm_get_reg_data(dev,
+			QTI_HWKM_ICE_RG_BANK0_BANKN_STATUS,
+			RSP_FIFO_AVAILABLE_DATA, RSP_FIFO_AVAILABLE_DATA_MASK,
+			ICEMEM_SLAVE) > 0);
+		if (qti_hwkm_get_reg_data(dev,
+			QTI_HWKM_ICE_RG_BANK0_BANKN_STATUS,
+			RSP_FIFO_AVAILABLE_DATA, RSP_FIFO_AVAILABLE_DATA_MASK,
+			ICEMEM_SLAVE) == 0) {
+			pr_err("%s: rsp fifo data not available\n", __func__);
+			err = -1;
+			return err;
+		}
+		rsp_packet[i] = qti_hwkm_readl(dev,
+				QTI_HWKM_ICE_RG_BANK0_RSP_0, ICEMEM_SLAVE);
+	}
+
+	if (!qti_hwkm_testb(dev, QTI_HWKM_ICE_RG_BANK0_BANKN_IRQ_STATUS,
+			CMD_DONE_BIT, ICEMEM_SLAVE)) {
+		pr_err("%s: CMD_DONE_BIT not set\n", __func__);
+		err = -1;
+		return err;
+	}
+
+	// Clear CMD_DONE status bit
+	qti_hwkm_setb(dev, QTI_HWKM_ICE_RG_BANK0_BANKN_IRQ_STATUS,
+			CMD_DONE_BIT, ICEMEM_SLAVE);
+	/* Write memory barrier */
+	wmb();
+
+	return err;
+}
+
+/*
+ * @brief Send a command packet to the selected KM instance and read
+ *        the response
+ *
+ * @param dest            [in]  Destination KM instance
+ * @param cmd_packet      [in]  pointer to start of command packet
+ * @param cmd_words       [in]  words in the command packet
+ * @param rsp_packet      [out] pointer to start of response packet
+ * @param rsp_words       [in]  words in the response buffer
+ *
+ * @return HWKM_SUCCESS if successful. HWKW Error Code otherwise.
+ */
+
+static int qti_hwkm_run_transaction(enum hwkm_destination dest,
+				    const uint32_t *cmd_packet,
+				    size_t cmd_words,
+				    uint32_t *rsp_packet,
+				    size_t rsp_words)
+{
+	int status = 0;
+
+	if (cmd_packet == NULL || rsp_packet == NULL) {
+		status = -1;
+		return status;
+	}
+
+	switch (dest) {
+	case KM_MASTER:
+		status = qti_hwkm_master_transaction(km_device,
+					cmd_packet, cmd_words,
+					rsp_packet, rsp_words);
+		break;
+	case ICEMEM_SLAVE:
+		status = qti_hwkm_ice_transaction(km_device,
+					cmd_packet, cmd_words,
+					rsp_packet, rsp_words);
+		break;
+	default:
+		status = -2;
+		break;
+	}
+
+	return status;
+}
+
+static void serialize_policy(struct hwkm_serialized_policy *out,
+			     const struct hwkm_key_policy *policy)
+{
+	memset(out, 0, sizeof(struct hwkm_serialized_policy));
+	out->wrap_with_tpkey = policy->wrap_with_tpk_allowed;
+	out->hw_destination = policy->hw_destination;
+	out->security_level = policy->security_lvl;
+	out->swap_export_allowed = policy->swap_export_allowed;
+	out->wrap_export_allowed = policy->wrap_export_allowed;
+	out->key_type = policy->key_type;
+	out->kdf_depth = policy->kdf_depth;
+	out->encrypt_allowed = policy->enc_allowed;
+	out->decrypt_allowed = policy->dec_allowed;
+	out->alg_allowed = policy->alg_allowed;
+	out->key_management_by_tz_secure_allowed = policy->km_by_tz_allowed;
+	out->key_management_by_nonsecure_allowed = policy->km_by_nsec_allowed;
+	out->key_management_by_modem_allowed = policy->km_by_modem_allowed;
+	out->key_management_by_spu_allowed = policy->km_by_spu_allowed;
+}
+
+static void serialize_kdf_bsve(struct hwkm_kdf_bsve *out,
+			       const struct hwkm_bsve *bsve, u8 mks)
+{
+	memset(out, 0, sizeof(struct hwkm_kdf_bsve));
+	out->mks = mks;
+	out->key_policy_version_en = bsve->km_key_policy_ver_en;
+	out->apps_secure_en = bsve->km_apps_secure_en;
+	out->msa_secure_en = bsve->km_msa_secure_en;
+	out->lcm_fuse_row_en = bsve->km_lcm_fuse_en;
+	out->boot_stage_otp_en = bsve->km_boot_stage_otp_en;
+	out->swc_en = bsve->km_swc_en;
+	out->fuse_region_sha_digest_en = bsve->km_fuse_region_sha_digest_en;
+	out->child_key_policy_en = bsve->km_child_key_policy_en;
+	out->mks_en = bsve->km_mks_en;
+}
+
+static void deserialize_policy(struct hwkm_key_policy *out,
+			       const struct hwkm_serialized_policy *policy)
+{
+	memset(out, 0, sizeof(struct hwkm_key_policy));
+	out->wrap_with_tpk_allowed = policy->wrap_with_tpkey;
+	out->hw_destination = policy->hw_destination;
+	out->security_lvl = policy->security_level;
+	out->swap_export_allowed = policy->swap_export_allowed;
+	out->wrap_export_allowed = policy->wrap_export_allowed;
+	out->key_type = policy->key_type;
+	out->kdf_depth = policy->kdf_depth;
+	out->enc_allowed = policy->encrypt_allowed;
+	out->dec_allowed = policy->decrypt_allowed;
+	out->alg_allowed = policy->alg_allowed;
+	out->km_by_tz_allowed = policy->key_management_by_tz_secure_allowed;
+	out->km_by_nsec_allowed = policy->key_management_by_nonsecure_allowed;
+	out->km_by_modem_allowed = policy->key_management_by_modem_allowed;
+	out->km_by_spu_allowed = policy->key_management_by_spu_allowed;
+}
+
+static void reverse_key(u8 *key, size_t keylen)
+{
+	size_t left = 0;
+	size_t right = 0;
+
+	for (left = 0, right = keylen - 1; left < right; left++, right--) {
+		key[left] ^= key[right];
+		key[right] ^= key[left];
+		key[left] ^= key[right];
+	}
+}
+
+/*
+ * Command packet format (word indices):
+ * CMD[0]    = Operation info (OP, IRQ_EN, DKS, LEN)
+ * CMD[1:17] = Wrapped Key Blob
+ * CMD[18]   = CRC (disabled)
+ *
+ * Response packet format (word indices):
+ * RSP[0]    = Operation info (OP, IRQ_EN, LEN)
+ * RSP[1]    = Error status
+ */
+
+static int qti_handle_key_unwrap_import(const struct hwkm_cmd *cmd_in,
+					struct hwkm_rsp *rsp_in)
+{
+	int status = 0;
+	u32 cmd[UNWRAP_IMPORT_CMD_WORDS] = {0};
+	u32 rsp[UNWRAP_IMPORT_RSP_WORDS] = {0};
+	struct hwkm_operation_info operation = {
+		.op = KEY_UNWRAP_IMPORT,
+		.irq_en = ASYNC_CMD_HANDLING,
+		.slot1_desc = cmd_in->unwrap.dks,
+		.slot2_desc = cmd_in->unwrap.kwk,
+		.len = UNWRAP_IMPORT_CMD_WORDS
+	};
+
+	pr_debug("%s: KEY_UNWRAP_IMPORT start\n", __func__);
+
+	memcpy(cmd, &operation, OPERATION_INFO_LENGTH);
+	memcpy(cmd + COMMAND_WRAPPED_KEY_IDX, cmd_in->unwrap.wkb,
+			cmd_in->unwrap.sz);
+
+	status = qti_hwkm_run_transaction(ICEMEM_SLAVE, cmd,
+			UNWRAP_IMPORT_CMD_WORDS, rsp, UNWRAP_IMPORT_RSP_WORDS);
+	if (status) {
+		pr_err("%s: Error running transaction %d\n", __func__, status);
+		return status;
+	}
+
+	rsp_in->status = rsp[RESPONSE_ERR_IDX];
+	if (rsp_in->status) {
+		pr_err("%s: KEY_UNWRAP_IMPORT error status 0x%x\n", __func__,
+								rsp_in->status);
+		return rsp_in->status;
+	}
+
+	return status;
+}
+
+/*
+ * Command packet format (word indices):
+ * CMD[0] = Operation info (OP, IRQ_EN, DKS, DK, LEN)
+ * CMD[1] = CRC (disabled)
+ *
+ * Response packet format (word indices):
+ * RSP[0] = Operation info (OP, IRQ_EN, LEN)
+ * RSP[1] = Error status
+ */
+
+static int qti_handle_keyslot_clear(const struct hwkm_cmd *cmd_in,
+				    struct hwkm_rsp *rsp_in)
+{
+	int status = 0;
+	u32 cmd[KEYSLOT_CLEAR_CMD_WORDS] = {0};
+	u32 rsp[KEYSLOT_CLEAR_RSP_WORDS] = {0};
+	struct hwkm_operation_info operation = {
+		.op = KEY_SLOT_CLEAR,
+		.irq_en = ASYNC_CMD_HANDLING,
+		.slot1_desc = cmd_in->clear.dks,
+		.op_flag = cmd_in->clear.is_double_key,
+		.len = KEYSLOT_CLEAR_CMD_WORDS
+	};
+
+	pr_debug("%s: KEY_SLOT_CLEAR start\n", __func__);
+
+	memcpy(cmd, &operation, OPERATION_INFO_LENGTH);
+
+	status = qti_hwkm_run_transaction(ICEMEM_SLAVE, cmd,
+				KEYSLOT_CLEAR_CMD_WORDS, rsp,
+				KEYSLOT_CLEAR_RSP_WORDS);
+	if (status) {
+		pr_err("%s: Error running transaction %d\n", __func__, status);
+		return status;
+	}
+
+	rsp_in->status = rsp[RESPONSE_ERR_IDX];
+	if (rsp_in->status) {
+		pr_err("%s: KEYSLOT_CLEAR error status 0x%x\n",
+				__func__, rsp_in->status);
+		return rsp_in->status;
+	}
+
+	return status;
+}
+
+/*
+ * NOTE: The command packet can vary in length. If BE = 0, the last 2 indices
+ * for the BSVE are skipped. Similarly, if Software Context Length (SCL) < 16,
+ * only SCL words are written to the packet. The CRC word is after the last
+ * word of the SWC. The LEN field of this command does not include the SCL
+ * (unlike other commands where the LEN field is the length of the entire
+ * packet). The HW will expect SCL + LEN words to be sent.
+ *
+ * Command packet format (word indices):
+ * CMD[0]    = Operation info (OP, IRQ_EN, DKS, KDK, BE, SCL, LEN)
+ * CMD[1:2]  = Policy
+ * CMD[3]    = BSVE[0] if BE = 1, 0 if BE = 0
+ * CMD[4:5]  = BSVE[1:2] if BE = 1, skipped if BE = 0
+ * CMD[6:21] = Software Context, only writing the number of words in SCL
+ * CMD[22]   = CRC
+ *
+ * Response packet format (word indices):
+ * RSP[0]    = Operation info (OP, IRQ_EN, LEN)
+ * RSP[1]    = Error status
+ */
+
+static int qti_handle_system_kdf(const struct hwkm_cmd *cmd_in,
+				 struct hwkm_rsp *rsp_in)
+{
+	int status = 0;
+	u32 cmd[SYSTEM_KDF_CMD_MAX_WORDS] = {0};
+	u32 rsp[SYSTEM_KDF_RSP_WORDS] = {0};
+	u8 *cmd_ptr = (u8 *) cmd;
+	struct hwkm_serialized_policy policy;
+	struct hwkm_operation_info operation = {
+		.op = SYSTEM_KDF,
+		.irq_en = ASYNC_CMD_HANDLING,
+		.slot1_desc = cmd_in->kdf.dks,
+		.slot2_desc = cmd_in->kdf.kdk,
+		.op_flag = cmd_in->kdf.bsve.enabled,
+		.context_len = BYTES_TO_WORDS(cmd_in->kdf.sz),
+		.len = SYSTEM_KDF_CMD_MIN_WORDS +
+			(cmd_in->kdf.bsve.enabled ? BSVE_WORDS : 1)
+	};
+
+	pr_debug("%s: SYSTEM_KDF start\n", __func__);
+
+	serialize_policy(&policy, &cmd_in->kdf.policy);
+
+	WRITE_TO_KDF_PACKET(cmd_ptr, &operation, OPERATION_INFO_LENGTH);
+	WRITE_TO_KDF_PACKET(cmd_ptr, &policy, KEY_POLICY_LENGTH);
+
+	if (cmd_in->kdf.bsve.enabled) {
+		struct hwkm_kdf_bsve bsve;
+
+		serialize_kdf_bsve(&bsve, &cmd_in->kdf.bsve, cmd_in->kdf.mks);
+		WRITE_TO_KDF_PACKET(cmd_ptr, &bsve, MAX_BSVE_LENGTH);
+	} else {
+		// Skip the remaining 3 bytes of the current word
+		cmd_ptr += 3 * (sizeof(u8));
+	}
+
+	WRITE_TO_KDF_PACKET(cmd_ptr, cmd_in->kdf.ctx, cmd_in->kdf.sz);
+
+	status = qti_hwkm_run_transaction(ICEMEM_SLAVE, cmd,
+				operation.len + operation.context_len,
+				rsp, SYSTEM_KDF_RSP_WORDS);
+	if (status) {
+		pr_err("%s: Error running transaction %d\n", __func__, status);
+		return status;
+	}
+
+	rsp_in->status = rsp[RESPONSE_ERR_IDX];
+	if (rsp_in->status) {
+		pr_err("%s: SYSTEM_KDF error status 0x%x\n", __func__,
+					rsp_in->status);
+		return rsp_in->status;
+	}
+
+	return status;
+}
+
+/*
+ * Command packet format (word indices):
+ * CMD[0] = Operation info (OP, IRQ_EN, SKS, LEN)
+ * CMD[1] = CRC (disabled)
+ *
+ * Response packet format (word indices):
+ * RSP[0] = Operation info (OP, IRQ_EN, LEN)
+ * RSP[1] = Error status
+ */
+
+static int qti_handle_set_tpkey(const struct hwkm_cmd *cmd_in,
+				struct hwkm_rsp *rsp_in)
+{
+	int status = 0;
+	u32 cmd[SET_TPKEY_CMD_WORDS] = {0};
+	u32 rsp[SET_TPKEY_RSP_WORDS] = {0};
+	struct hwkm_operation_info operation = {
+		.op = SET_TPKEY,
+		.irq_en = ASYNC_CMD_HANDLING,
+		.slot1_desc = cmd_in->set_tpkey.sks,
+		.len = SET_TPKEY_CMD_WORDS
+	};
+
+	pr_debug("%s: SET_TPKEY start\n", __func__);
+
+	memcpy(cmd, &operation, OPERATION_INFO_LENGTH);
+
+	status = qti_hwkm_run_transaction(KM_MASTER, cmd,
+			SET_TPKEY_CMD_WORDS, rsp, SET_TPKEY_RSP_WORDS);
+	if (status) {
+		pr_err("%s: Error running transaction %d\n", __func__, status);
+		return status;
+	}
+
+	rsp_in->status = rsp[RESPONSE_ERR_IDX];
+	if (rsp_in->status) {
+		pr_err("%s: SET_TPKEY error status 0x%x\n", __func__,
+					rsp_in->status);
+		return rsp_in->status;
+	}
+
+	return status;
+}
+
+/**
+ * 254 * NOTE: To anyone maintaining or porting this code wondering why the key
+ * is reversed in the command packet: the plaintext key value is expected by
+ * the HW in reverse byte order.
+ *       See section 1.8.2.2 of the HWKM CPAS for more details
+ *       Mapping of key to CE key read order:
+ *       Key[255:224] -> CRYPTO0_CRYPTO_ENCR_KEY0
+ *       Key[223:192] -> CRYPTO0_CRYPTO_ENCR_KEY1
+ *       ...
+ *       Key[63:32]   -> CRYPTO0_CRYPTO_ENCR_KEY6
+ *       Key[31:0]    -> CRYPTO0_CRYPTO_ENCR_KEY7
+ *       In this notation Key[31:0] is the least significant word of the key
+ *       If the key length is less than 256 bits, the key is filled in from
+ *       higher index to lower
+ *       For example, for a 128 bit key, Key[255:128] would have the key,
+ *       Key[127:0] would be all 0
+ *       This means that CMD[3:6] is all 0, CMD[7:10] has the key value.
+ *
+ * Command packet format (word indices):
+ * CMD[0]    = Operation info (OP, IRQ_EN, DKS/SKS, WE, LEN)
+ * CMD[1:2]  = Policy (0 if we == 0)
+ * CMD[3:10] = Write key value (0 if we == 0)
+ * CMD[11]   = CRC (disabled)
+ *
+ * Response packet format (word indices):
+ * RSP[0]    = Operation info (OP, IRQ_EN, LEN)
+ * RSP[1]    = Error status
+ * RSP[2:3]  = Policy (0 if we == 1)
+ * RSP[4:11] = Read key value (0 if we == 1)
+ **/
+
+static int qti_handle_keyslot_rdwr(const struct hwkm_cmd *cmd_in,
+				   struct hwkm_rsp *rsp_in)
+{
+	int status = 0;
+	u32 cmd[KEYSLOT_RDWR_CMD_WORDS] = {0};
+	u32 rsp[KEYSLOT_RDWR_RSP_WORDS] = {0};
+	struct hwkm_serialized_policy policy;
+	struct hwkm_operation_info operation = {
+		.op = KEY_SLOT_RDWR,
+		.irq_en = ASYNC_CMD_HANDLING,
+		.slot1_desc = cmd_in->rdwr.slot,
+		.op_flag = cmd_in->rdwr.is_write,
+		.len = KEYSLOT_RDWR_CMD_WORDS
+	};
+
+	pr_debug("%s: KEY_SLOT_RDWR start\n", __func__);
+	memcpy(cmd, &operation, OPERATION_INFO_LENGTH);
+
+	if (cmd_in->rdwr.is_write) {
+		serialize_policy(&policy, &cmd_in->rdwr.policy);
+		memcpy(cmd + COMMAND_KEY_POLICY_IDX, &policy,
+				KEY_POLICY_LENGTH);
+		memcpy(cmd + COMMAND_KEY_VALUE_IDX, cmd_in->rdwr.key,
+				cmd_in->rdwr.sz);
+		// Need to reverse the key because the HW expects it in reverse
+		// byte order
+		reverse_key((u8 *) (cmd + COMMAND_KEY_VALUE_IDX),
+				HWKM_MAX_KEY_SIZE);
+	}
+
+	status = qti_hwkm_run_transaction(ICEMEM_SLAVE, cmd,
+			KEYSLOT_RDWR_CMD_WORDS, rsp, KEYSLOT_RDWR_RSP_WORDS);
+	if (status) {
+		pr_err("%s: Error running transaction %d\n", __func__, status);
+		return status;
+	}
+
+	rsp_in->status = rsp[RESPONSE_ERR_IDX];
+	if (rsp_in->status) {
+		pr_err("%s: KEY_SLOT_RDWR error status 0x%x\n",
+				__func__, rsp_in->status);
+		return rsp_in->status;
+	}
+
+	if (!cmd_in->rdwr.is_write &&
+			(rsp_in->status == 0)) {
+		memcpy(&policy, rsp + RESPONSE_KEY_POLICY_IDX,
+						KEY_POLICY_LENGTH);
+		memcpy(rsp_in->rdwr.key,
+			rsp + RESPONSE_KEY_VALUE_IDX, RESPONSE_KEY_LENGTH);
+		// Need to reverse the key because the HW returns it in
+		// reverse byte order
+		reverse_key(rsp_in->rdwr.key, HWKM_MAX_KEY_SIZE);
+		deserialize_policy(&rsp_in->rdwr.policy, &policy);
+	}
+
+	// Clear cmd and rsp buffers, since they may contain plaintext keys
+	memset(cmd, 0, sizeof(cmd));
+	memset(rsp, 0, sizeof(rsp));
+
+	return status;
+}
+
+static int qti_hwkm_parse_clock_info(struct platform_device *pdev,
+				     struct hwkm_device *hwkm_dev)
+{
+	int ret = -1, cnt, i, len;
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	char *name;
+	struct hwkm_clk_info *clki;
+	u32 *clkfreq = NULL;
+
+	if (!np)
+		goto out;
+
+	cnt = of_property_count_strings(np, "clock-names");
+	if (cnt <= 0) {
+		dev_info(dev, "%s: Unable to find clocks, assuming enabled\n",
+				__func__);
+		ret = cnt;
+		goto out;
+	}
+
+	if (!of_get_property(np, "qcom,op-freq-hz", &len)) {
+		dev_info(dev, "qcom,op-freq-hz property not specified\n");
+		goto out;
+	}
+
+	len = len/sizeof(*clkfreq);
+	if (len != cnt)
+		goto out;
+
+	clkfreq = devm_kzalloc(dev, len * sizeof(*clkfreq), GFP_KERNEL);
+	if (!clkfreq) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	ret = of_property_read_u32_array(np, "qcom,op-freq-hz", clkfreq, len);
+
+	INIT_LIST_HEAD(&hwkm_dev->clk_list_head);
+
+	for (i = 0; i < cnt; i++) {
+		ret = of_property_read_string_index(np,
+			"clock-names", i, (const char **)&name);
+		if (ret)
+			goto out;
+
+		clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
+		if (!clki) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		clki->max_freq = clkfreq[i];
+		clki->name = kstrdup(name, GFP_KERNEL);
+		list_add_tail(&clki->list, &hwkm_dev->clk_list_head);
+	}
+out:
+	return ret;
+}
+
+static int qti_hwkm_init_clocks(struct hwkm_device *hwkm_dev)
+{
+	int ret = -EINVAL;
+	struct hwkm_clk_info *clki = NULL;
+	struct device *dev = hwkm_dev->dev;
+	struct list_head *head = &hwkm_dev->clk_list_head;
+
+	if (!hwkm_dev->is_hwkm_clk_available)
+		return 0;
+
+	if (!head || list_empty(head)) {
+		dev_err(dev, "%s: HWKM clock list null/empty\n", __func__);
+		goto out;
+	}
+
+	list_for_each_entry(clki, head, list) {
+		if (!clki->name)
+			continue;
+
+		clki->clk = devm_clk_get(dev, clki->name);
+		if (IS_ERR(clki->clk)) {
+			ret = PTR_ERR(clki->clk);
+			dev_err(dev, "%s: %s clk get failed, %d\n",
+					__func__, clki->name, ret);
+			goto out;
+		}
+
+		ret = 0;
+		if (clki->max_freq) {
+			ret = clk_set_rate(clki->clk, clki->max_freq);
+			if (ret) {
+				dev_err(dev,
+				"%s: %s clk set rate(%dHz) failed, %d\n",
+				__func__, clki->name, clki->max_freq, ret);
+				goto out;
+			}
+			clki->curr_freq = clki->max_freq;
+			dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
+				clki->name, clk_get_rate(clki->clk));
+		}
+	}
+out:
+	return ret;
+}
+
+static int qti_hwkm_enable_disable_clocks(struct hwkm_device *hwkm_dev,
+					  bool enable)
+{
+	int ret = 0;
+	struct hwkm_clk_info *clki = NULL;
+	struct device *dev = hwkm_dev->dev;
+	struct list_head *head = &hwkm_dev->clk_list_head;
+
+	if (!head || list_empty(head)) {
+		dev_err(dev, "%s: HWKM clock list null/empty\n", __func__);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (!hwkm_dev->is_hwkm_clk_available) {
+		dev_err(dev, "%s: HWKM clock not available\n", __func__);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	list_for_each_entry(clki, head, list) {
+		if (!clki->name)
+			continue;
+
+		if (enable)
+			ret = clk_prepare_enable(clki->clk);
+		else
+			clk_disable_unprepare(clki->clk);
+
+		if (ret) {
+			dev_err(dev, "Unable to %s HWKM clock\n",
+				enable?"enable":"disable");
+			goto out;
+		}
+	}
+out:
+	return ret;
+}
+
+int qti_hwkm_clocks(bool on)
+{
+	int ret = 0;
+
+	ret = qti_hwkm_enable_disable_clocks(km_device, on);
+	if (ret) {
+		pr_err("%s:%pK Could not enable/disable clocks\n",
+				__func__, km_device);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(qti_hwkm_clocks);
+
+static int qti_hwkm_get_device_tree_data(struct platform_device *pdev,
+					 struct hwkm_device *hwkm_dev)
+{
+	struct device *dev = &pdev->dev;
+	int ret = 0;
+
+	hwkm_dev->km_res = platform_get_resource_byname(pdev,
+				IORESOURCE_MEM, "km_master");
+	hwkm_dev->ice_res = platform_get_resource_byname(pdev,
+				IORESOURCE_MEM, "ice_slave");
+	if (!hwkm_dev->km_res || !hwkm_dev->ice_res) {
+		pr_err("%s: No memory available for IORESOURCE\n", __func__);
+		return -ENOMEM;
+	}
+
+	hwkm_dev->km_base = devm_ioremap_resource(dev, hwkm_dev->km_res);
+	hwkm_dev->ice_base = devm_ioremap_resource(dev, hwkm_dev->ice_res);
+
+	if (IS_ERR(hwkm_dev->km_base) || IS_ERR(hwkm_dev->ice_base)) {
+		ret = PTR_ERR(hwkm_dev->km_base);
+		pr_err("%s: Error = %d mapping HWKM memory\n", __func__, ret);
+		goto out;
+	}
+
+	hwkm_dev->is_hwkm_clk_available = of_property_read_bool(
+				dev->of_node, "qcom,enable-hwkm-clk");
+
+	if (hwkm_dev->is_hwkm_clk_available) {
+		ret = qti_hwkm_parse_clock_info(pdev, hwkm_dev);
+		if (ret) {
+			pr_err("%s: qti_hwkm_parse_clock_info failed (%d)\n",
+				__func__, ret);
+			goto out;
+		}
+	}
+
+out:
+	return ret;
+}
+
+int qti_hwkm_handle_cmd(struct hwkm_cmd *cmd, struct hwkm_rsp *rsp)
+{
+	switch (cmd->op) {
+	case SYSTEM_KDF:
+		return qti_handle_system_kdf(cmd, rsp);
+	case KEY_UNWRAP_IMPORT:
+		return qti_handle_key_unwrap_import(cmd, rsp);
+	case KEY_SLOT_CLEAR:
+		return qti_handle_keyslot_clear(cmd, rsp);
+	case KEY_SLOT_RDWR:
+		return qti_handle_keyslot_rdwr(cmd, rsp);
+	case SET_TPKEY:
+		return qti_handle_set_tpkey(cmd, rsp);
+	case NIST_KEYGEN:
+	case KEY_WRAP_EXPORT:
+	case QFPROM_KEY_RDWR: // cmd for HW initialization cmd only
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(qti_hwkm_handle_cmd);
+
+static void qti_hwkm_configure_slot_access(struct hwkm_device *dev)
+{
+	qti_hwkm_writel(dev, 0xffffffff,
+		QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_0, ICEMEM_SLAVE);
+	qti_hwkm_writel(dev, 0xffffffff,
+		QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_1, ICEMEM_SLAVE);
+	qti_hwkm_writel(dev, 0xffffffff,
+		QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_2, ICEMEM_SLAVE);
+	qti_hwkm_writel(dev, 0xffffffff,
+		QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_3, ICEMEM_SLAVE);
+	qti_hwkm_writel(dev, 0xffffffff,
+		QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_4, ICEMEM_SLAVE);
+}
+
+static int qti_hwkm_check_bist_status(struct hwkm_device *hwkm_dev)
+{
+	if (!qti_hwkm_testb(hwkm_dev, QTI_HWKM_ICE_RG_TZ_KM_STATUS,
+		BIST_DONE, ICEMEM_SLAVE)) {
+		pr_err("%s: Error with BIST_DONE\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!qti_hwkm_testb(hwkm_dev, QTI_HWKM_ICE_RG_TZ_KM_STATUS,
+		CRYPTO_LIB_BIST_DONE, ICEMEM_SLAVE)) {
+		pr_err("%s: Error with CRYPTO_LIB_BIST_DONE\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!qti_hwkm_testb(hwkm_dev, QTI_HWKM_ICE_RG_TZ_KM_STATUS,
+		BOOT_CMD_LIST1_DONE, ICEMEM_SLAVE)) {
+		pr_err("%s: Error with BOOT_CMD_LIST1_DONE\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!qti_hwkm_testb(hwkm_dev, QTI_HWKM_ICE_RG_TZ_KM_STATUS,
+		BOOT_CMD_LIST0_DONE, ICEMEM_SLAVE)) {
+		pr_err("%s: Error with BOOT_CMD_LIST0_DONE\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!qti_hwkm_testb(hwkm_dev, QTI_HWKM_ICE_RG_TZ_KM_STATUS,
+		KT_CLEAR_DONE, ICEMEM_SLAVE)) {
+		pr_err("%s: KT_CLEAR_DONE\n", __func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int qti_hwkm_ice_init_sequence(struct hwkm_device *hwkm_dev)
+{
+	int ret = 0;
+
+	// Put ICE in standard mode
+	qti_hwkm_writel(hwkm_dev, 0x7, QTI_HWKM_ICE_RG_TZ_KM_CTL, ICEMEM_SLAVE);
+	/* Write memory barrier */
+	wmb();
+
+	ret = qti_hwkm_check_bist_status(hwkm_dev);
+	if (ret) {
+		pr_err("%s: Error in BIST initialization %d\n", __func__, ret);
+		return ret;
+	}
+
+	// Disable CRC checks
+	qti_hwkm_clearb(hwkm_dev, QTI_HWKM_ICE_RG_TZ_KM_CTL, CRC_CHECK_EN,
+			ICEMEM_SLAVE);
+	/* Write memory barrier */
+	wmb();
+
+	// Configure key slots to be accessed by HLOS
+	qti_hwkm_configure_slot_access(hwkm_dev);
+	/* Write memory barrier */
+	wmb();
+
+	// Clear RSP_FIFO_FULL bit
+	qti_hwkm_setb(hwkm_dev, QTI_HWKM_ICE_RG_BANK0_BANKN_IRQ_STATUS,
+			RSP_FIFO_FULL, ICEMEM_SLAVE);
+	/* Write memory barrier */
+	wmb();
+
+	return ret;
+}
+
+static void qti_hwkm_enable_slave_receive_mode(struct hwkm_device *hwkm_dev)
+{
+	qti_hwkm_clearb(hwkm_dev, QTI_HWKM_ICE_RG_TZ_TPKEY_RECEIVE_CTL,
+			TPKEY_EN, ICEMEM_SLAVE);
+	/* Write memory barrier */
+	wmb();
+	qti_hwkm_writel(hwkm_dev, ICEMEM_SLAVE_TPKEY_VAL,
+			QTI_HWKM_ICE_RG_TZ_TPKEY_RECEIVE_CTL, ICEMEM_SLAVE);
+	/* Write memory barrier */
+	wmb();
+}
+
+static void qti_hwkm_disable_slave_receive_mode(struct hwkm_device *hwkm_dev)
+{
+	qti_hwkm_clearb(hwkm_dev, QTI_HWKM_ICE_RG_TZ_TPKEY_RECEIVE_CTL,
+			TPKEY_EN, ICEMEM_SLAVE);
+	/* Write memory barrier */
+	wmb();
+}
+
+static void qti_hwkm_check_tpkey_status(struct hwkm_device *hwkm_dev)
+{
+	int val = 0;
+
+	val = qti_hwkm_readl(hwkm_dev, QTI_HWKM_ICE_RG_TZ_TPKEY_RECEIVE_STATUS,
+			ICEMEM_SLAVE);
+
+	pr_debug("%s: Tpkey receive status 0x%x\n", __func__, val);
+}
+
+static int qti_hwkm_set_tpkey(void)
+{
+	int ret = 0;
+	struct hwkm_cmd cmd;
+	struct hwkm_rsp rsp;
+
+	cmd.op = SET_TPKEY;
+	cmd.set_tpkey.sks = KM_MASTER_TPKEY_SLOT;
+
+	qti_hwkm_enable_slave_receive_mode(km_device);
+	ret = qti_hwkm_handle_cmd(&cmd, &rsp);
+	if (ret) {
+		pr_err("%s: Error running commands\n", __func__, ret);
+		return ret;
+	}
+
+	qti_hwkm_check_tpkey_status(km_device);
+	qti_hwkm_disable_slave_receive_mode(km_device);
+
+	return 0;
+}
+
+int qti_hwkm_init(void)
+{
+	int ret = 0;
+
+	ret = qti_hwkm_ice_init_sequence(km_device);
+	if (ret) {
+		pr_err("%s: Error in ICE init sequence %d\n", __func__, ret);
+		return ret;
+	}
+
+	ret = qti_hwkm_set_tpkey();
+	if (ret) {
+		pr_err("%s: Error setting ICE to receive %d\n", __func__, ret);
+		return ret;
+	}
+	/* Write memory barrier */
+	wmb();
+	return ret;
+}
+EXPORT_SYMBOL(qti_hwkm_init);
+
+static int qti_hwkm_probe(struct platform_device *pdev)
+{
+	struct hwkm_device *hwkm_dev;
+	int ret = 0;
+
+	pr_debug("%s %d: HWKM probe start\n", __func__, __LINE__);
+	if (!pdev) {
+		pr_err("%s: Invalid platform_device passed\n", __func__);
+		return -EINVAL;
+	}
+
+	hwkm_dev = kzalloc(sizeof(struct hwkm_device), GFP_KERNEL);
+	if (!hwkm_dev) {
+		ret = -ENOMEM;
+		pr_err("%s: Error %d allocating memory for HWKM device\n",
+			__func__, ret);
+		goto err_hwkm_dev;
+	}
+
+	hwkm_dev->dev = &pdev->dev;
+	if (!hwkm_dev->dev) {
+		ret = -EINVAL;
+		pr_err("%s: Invalid device passed in platform_device\n",
+			__func__);
+		goto err_hwkm_dev;
+	}
+
+	if (pdev->dev.of_node)
+		ret = qti_hwkm_get_device_tree_data(pdev, hwkm_dev);
+	else {
+		ret = -EINVAL;
+		pr_err("%s: HWKM device node not found\n", __func__);
+	}
+	if (ret)
+		goto err_hwkm_dev;
+
+	ret = qti_hwkm_init_clocks(hwkm_dev);
+	if (ret) {
+		pr_err("%s: Error initializing clocks %d\n", __func__, ret);
+		goto err_hwkm_dev;
+	}
+
+	hwkm_dev->is_hwkm_enabled = true;
+	km_device = hwkm_dev;
+	platform_set_drvdata(pdev, hwkm_dev);
+
+	pr_debug("%s %d: HWKM probe ends\n", __func__, __LINE__);
+	return ret;
+
+err_hwkm_dev:
+	km_device = NULL;
+	kfree(hwkm_dev);
+	return ret;
+}
+
+
+static int qti_hwkm_remove(struct platform_device *pdev)
+{
+	kfree(km_device);
+	return 0;
+}
+
+static const struct of_device_id qti_hwkm_match[] = {
+	{ .compatible = "qcom,hwkm"},
+	{},
+};
+MODULE_DEVICE_TABLE(of, qti_hwkm_match);
+
+static struct platform_driver qti_hwkm_driver = {
+	.probe		= qti_hwkm_probe,
+	.remove		= qti_hwkm_remove,
+	.driver		= {
+		.name	= "qti_hwkm",
+		.of_match_table	= qti_hwkm_match,
+	},
+};
+module_platform_driver(qti_hwkm_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI Hardware Key Manager driver");
diff --git a/drivers/soc/qcom/hwkm_serialize.h b/drivers/soc/qcom/hwkm_serialize.h
new file mode 100644
index 0000000..2d73ff5
--- /dev/null
+++ b/drivers/soc/qcom/hwkm_serialize.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __HWKM_SERIALIZE_H_
+#define __HWKM_SERIALIZE_H_
+
+#include <stdbool.h>
+#include <stddef.h>
+
+#include <linux/hwkm.h>
+
+/* Command lengths (words) */
+#define NIST_KEYGEN_CMD_WORDS 4
+#define SYSTEM_KDF_CMD_MIN_WORDS 4
+#define SYSTEM_KDF_CMD_MAX_WORDS 29
+#define KEYSLOT_CLEAR_CMD_WORDS 2
+#define UNWRAP_IMPORT_CMD_WORDS 19
+#define WRAP_EXPORT_CMD_WORDS 5
+#define SET_TPKEY_CMD_WORDS 2
+#define KEYSLOT_RDWR_CMD_WORDS 12
+#define QFPROM_RDWR_CMD_WORDS 2
+
+/* Response lengths (words) */
+#define NIST_KEYGEN_RSP_WORDS 2
+#define SYSTEM_KDF_RSP_WORDS 2
+#define KEYSLOT_CLEAR_RSP_WORDS 2
+#define UNWRAP_IMPORT_RSP_WORDS 2
+#define WRAP_EXPORT_RSP_WORDS 19
+#define SET_TPKEY_RSP_WORDS 2
+#define KEYSLOT_RDWR_RSP_WORDS 12
+#define QFPROM_RDWR_RSP_WORDS 2
+
+/* Field lengths (words) */
+#define OPERATION_INFO_WORDS 1
+#define KEY_POLICY_WORDS 2
+#define BSVE_WORDS 3
+#define MAX_SWC_WORDS 16
+#define RESPONSE_KEY_WORDS 8
+#define KEY_BLOB_WORDS 17
+
+/* Field lengths (bytes) */
+#define OPERATION_INFO_LENGTH (OPERATION_INFO_WORDS * sizeof(uint32_t))
+#define KEY_POLICY_LENGTH (KEY_POLICY_WORDS * sizeof(uint32_t))
+#define MAX_BSVE_LENGTH (BSVE_WORDS * sizeof(uint32_t))
+#define MAX_SWC_LENGTH (MAX_SWC_WORDS * sizeof(uint32_t))
+#define RESPONSE_KEY_LENGTH (RESPONSE_KEY_WORDS * sizeof(uint32_t))
+#define KEY_BLOB_LENGTH (KEY_BLOB_WORDS * sizeof(uint32_t))
+
+/* Command indices */
+#define COMMAND_KEY_POLICY_IDX 1
+#define COMMAND_KEY_VALUE_IDX 3
+#define COMMAND_WRAPPED_KEY_IDX 1
+#define COMMAND_KEY_WRAP_BSVE_IDX 1
+
+/* Response indices */
+#define RESPONSE_ERR_IDX 1
+#define RESPONSE_KEY_POLICY_IDX 2
+#define RESPONSE_KEY_VALUE_IDX 4
+#define RESPONSE_WRAPPED_KEY_IDX 2
+
+struct hwkm_serialized_policy {
+	unsigned dbg_qfprom_key_rd_iv_sel:1;		// [0]
+	unsigned reserved0:1;				// [1]
+	unsigned wrap_with_tpkey:1;			// [2]
+	unsigned hw_destination:4;			// [3:6]
+	unsigned reserved1:1;				// [7]
+	unsigned propagate_sec_level_to_child_keys:1;	// [8]
+	unsigned security_level:2;			// [9:10]
+	unsigned swap_export_allowed:1;			// [11]
+	unsigned wrap_export_allowed:1;			// [12]
+	unsigned key_type:3;				// [13:15]
+	unsigned kdf_depth:8;				// [16:23]
+	unsigned decrypt_allowed:1;			// [24]
+	unsigned encrypt_allowed:1;			// [25]
+	unsigned alg_allowed:6;				// [26:31]
+	unsigned key_management_by_tz_secure_allowed:1;	// [32]
+	unsigned key_management_by_nonsecure_allowed:1;	// [33]
+	unsigned key_management_by_modem_allowed:1;	// [34]
+	unsigned key_management_by_spu_allowed:1;	// [35]
+	unsigned reserved2:28;				// [36:63]
+} __packed;
+
+struct hwkm_kdf_bsve {
+	unsigned mks:8;				// [0:7]
+	unsigned key_policy_version_en:1;	// [8]
+	unsigned apps_secure_en:1;		// [9]
+	unsigned msa_secure_en:1;		// [10]
+	unsigned lcm_fuse_row_en:1;		// [11]
+	unsigned boot_stage_otp_en:1;		// [12]
+	unsigned swc_en:1;			// [13]
+	u64 fuse_region_sha_digest_en:64;	// [14:78]
+	unsigned child_key_policy_en:1;		// [79]
+	unsigned mks_en:1;			// [80]
+	unsigned reserved:16;			// [81:95]
+} __packed;
+
+struct hwkm_wrapping_bsve {
+	unsigned key_policy_version_en:1;      // [0]
+	unsigned apps_secure_en:1;             // [1]
+	unsigned msa_secure_en:1;              // [2]
+	unsigned lcm_fuse_row_en:1;            // [3]
+	unsigned boot_stage_otp_en:1;          // [4]
+	unsigned swc_en:1;                     // [5]
+	u64 fuse_region_sha_digest_en:64; // [6:69]
+	unsigned child_key_policy_en:1;        // [70]
+	unsigned mks_en:1;                     // [71]
+	unsigned reserved:24;                  // [72:95]
+} __packed;
+
+struct hwkm_operation_info {
+	unsigned op:4;		// [0-3]
+	unsigned irq_en:1;	// [4]
+	unsigned slot1_desc:8;	// [5,12]
+	unsigned slot2_desc:8;	// [13,20]
+	unsigned op_flag:1;	// [21]
+	unsigned context_len:5;	// [22-26]
+	unsigned len:5;		// [27-31]
+} __packed;
+
+#endif /* __HWKM_SERIALIZE_H_ */
diff --git a/drivers/soc/qcom/hwkmregs.h b/drivers/soc/qcom/hwkmregs.h
new file mode 100644
index 0000000..552e489
--- /dev/null
+++ b/drivers/soc/qcom/hwkmregs.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _QTI_HARDWARE_KEY_MANAGER_REGS_H_
+#define _QTI_HARDWARE_KEY_MANAGER_REGS_H_
+
+#define HWKM_VERSION_STEP_REV_MASK		0xFFFF
+#define HWKM_VERSION_STEP_REV			0 /* bit 15-0 */
+#define HWKM_VERSION_MAJOR_REV_MASK		0xFF000000
+#define HWKM_VERSION_MAJOR_REV			24 /* bit 31-24 */
+#define HWKM_VERSION_MINOR_REV_MASK		0xFF0000
+#define HWKM_VERSION_MINOR_REV			16 /* bit 23-16 */
+
+/* QTI HWKM master registers from SWI */
+/* QTI HWKM master shared registers */
+#define QTI_HWKM_MASTER_RG_IPCAT_VERSION		0x0000
+#define QTI_HWKM_MASTER_RG_KEY_POLICY_VERSION		0x0004
+#define QTI_HWKM_MASTER_RG_SHARED_STATUS		0x0008
+#define QTI_HWKM_MASTER_RG_KEYTABLE_SIZE		0x000C
+
+/* QTI HWKM master register bank 2 */
+#define QTI_HWKM_MASTER_RG_BANK2_BANKN_CTL		0x4000
+#define QTI_HWKM_MASTER_RG_BANK2_BANKN_STATUS		0x4004
+#define QTI_HWKM_MASTER_RG_BANK2_BANKN_IRQ_STATUS	0x4008
+#define QTI_HWKM_MASTER_RG_BANK2_BANKN_IRQ_MASK		0x400C
+#define QTI_HWKM_MASTER_RG_BANK2_BANKN_ESR		0x4010
+#define QTI_HWKM_MASTER_RG_BANK2_BANKN_ESR_IRQ_MASK	0x4014
+#define QTI_HWKM_MASTER_RG_BANK2_BANKN_ESYNR		0x4018
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_0			0x401C
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_1			0x4020
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_2			0x4024
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_3			0x4028
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_4			0x402C
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_5			0x4030
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_6			0x4034
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_7			0x4038
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_8			0x403C
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_9			0x4040
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_10			0x4044
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_11			0x4048
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_12			0x404C
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_13			0x4050
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_14			0x4054
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_15			0x4058
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_0			0x405C
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_1			0x4060
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_2			0x4064
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_3			0x4068
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_4			0x406C
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_5			0x4070
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_6			0x4074
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_7			0x4078
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_8			0x407C
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_9			0x4080
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_10			0x4084
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_11			0x4088
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_12			0x408C
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_13			0x4090
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_14			0x4094
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_15			0x4098
+#define QTI_HWKM_MASTER_RG_BANK2_BANKN_IRQ_ROUTING	0x409C
+#define QTI_HWKM_MASTER_RG_BANK2_BANKN_BBAC_0		0x40A0
+#define QTI_HWKM_MASTER_RG_BANK2_BANKN_BBAC_1		0x40A4
+#define QTI_HWKM_MASTER_RG_BANK2_BANKN_BBAC_2		0x40A8
+#define QTI_HWKM_MASTER_RG_BANK2_BANKN_BBAC_3		0x40AC
+#define QTI_HWKM_MASTER_RG_BANK2_BANKN_BBAC_4		0x40B0
+
+/* QTI HWKM master register bank 3 */
+#define QTI_HWKM_MASTER_RG_BANK3_BANKN_CTL		0x5000
+#define QTI_HWKM_MASTER_RG_BANK3_BANKN_STATUS		0x5004
+#define QTI_HWKM_MASTER_RG_BANK3_BANKN_IRQ_STATUS	0x5008
+#define QTI_HWKM_MASTER_RG_BANK3_BANKN_IRQ_MASK		0x500C
+#define QTI_HWKM_MASTER_RG_BANK3_BANKN_ESR		0x5010
+#define QTI_HWKM_MASTER_RG_BANK3_BANKN_ESR_IRQ_MASK	0x5014
+#define QTI_HWKM_MASTER_RG_BANK3_BANKN_ESYNR		0x5018
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_0			0x501C
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_1			0x5020
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_2			0x5024
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_3			0x5028
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_4			0x502C
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_5			0x5030
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_6			0x5034
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_7			0x5038
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_8			0x503C
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_9			0x5040
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_10			0x5044
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_11			0x5048
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_12			0x504C
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_13			0x5050
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_14			0x5054
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_15			0x5058
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_0			0x505C
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_1			0x5060
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_2			0x5064
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_3			0x5068
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_4			0x506C
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_5			0x5070
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_6			0x5074
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_7			0x5078
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_8			0x507C
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_9			0x5080
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_10			0x5084
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_11			0x5088
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_12			0x508C
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_13			0x5090
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_14			0x5094
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_15			0x5098
+#define QTI_HWKM_MASTER_RG_BANK3_BANKN_IRQ_ROUTING	0x509C
+#define QTI_HWKM_MASTER_RG_BANK3_BANKN_BBAC_0		0x50A0
+#define QTI_HWKM_MASTER_RG_BANK3_BANKN_BBAC_1		0x50A4
+#define QTI_HWKM_MASTER_RG_BANK3_BANKN_BBAC_2		0x50A8
+#define QTI_HWKM_MASTER_RG_BANK3_BANKN_BBAC_3		0x50AC
+#define QTI_HWKM_MASTER_RG_BANK3_BANKN_BBAC_4		0x50B0
+
+/* QTI HWKM access control registers for Bank 2 */
+#define QTI_HWKM_MASTER_RG_BANK2_AC_BANKN_BBAC_0	0x8000
+#define QTI_HWKM_MASTER_RG_BANK2_AC_BANKN_BBAC_1	0x8004
+#define QTI_HWKM_MASTER_RG_BANK2_AC_BANKN_BBAC_2	0x8008
+#define QTI_HWKM_MASTER_RG_BANK2_AC_BANKN_BBAC_3	0x800C
+#define QTI_HWKM_MASTER_RG_BANK2_AC_BANKN_BBAC_4	0x8010
+
+/* QTI HWKM access control registers for Bank 3 */
+#define QTI_HWKM_MASTER_RG_BANK3_AC_BANKN_BBAC_0	0x9000
+#define QTI_HWKM_MASTER_RG_BANK3_AC_BANKN_BBAC_1	0x9004
+#define QTI_HWKM_MASTER_RG_BANK3_AC_BANKN_BBAC_2	0x9008
+#define QTI_HWKM_MASTER_RG_BANK3_AC_BANKN_BBAC_3	0x900C
+#define QTI_HWKM_MASTER_RG_BANK3_AC_BANKN_BBAC_4	0x9010
+
+/* QTI HWKM ICE slave config and status registers */
+#define QTI_HWKM_ICE_RG_TZ_KM_CTL			0x1000
+#define QTI_HWKM_ICE_RG_TZ_KM_STATUS			0x1004
+#define QTI_HWKM_ICE_RG_TZ_KM_STATUS_IRQ_MASK		0x1008
+#define QTI_HWKM_ICE_RG_TZ_KM_BOOT_STAGE_OTP		0x100C
+#define QTI_HWKM_ICE_RG_TZ_KM_DEBUG_CTL			0x1010
+#define QTI_HWKM_ICE_RG_TZ_KM_DEBUG_WRITE		0x1014
+#define QTI_HWKM_ICE_RG_TZ_KM_DEBUG_READ		0x1018
+#define QTI_HWKM_ICE_RG_TZ_TPKEY_RECEIVE_CTL		0x101C
+#define QTI_HWKM_ICE_RG_TZ_TPKEY_RECEIVE_STATUS		0x1020
+#define QTI_HWKM_ICE_RG_TZ_KM_COMMON_IRQ_ROUTING	0x1024
+
+/* QTI HWKM ICE slave registers from SWI */
+/* QTI HWKM ICE slave shared registers */
+#define QTI_HWKM_ICE_RG_IPCAT_VERSION			0x0000
+#define QTI_HWKM_ICE_RG_KEY_POLICY_VERSION		0x0004
+#define QTI_HWKM_ICE_RG_SHARED_STATUS			0x0008
+#define QTI_HWKM_ICE_RG_KEYTABLE_SIZE			0x000C
+
+/* QTI HWKM ICE slave register bank 0 */
+#define QTI_HWKM_ICE_RG_BANK0_BANKN_CTL			0x2000
+#define QTI_HWKM_ICE_RG_BANK0_BANKN_STATUS		0x2004
+#define QTI_HWKM_ICE_RG_BANK0_BANKN_IRQ_STATUS		0x2008
+#define QTI_HWKM_ICE_RG_BANK0_BANKN_IRQ_MASK		0x200C
+#define QTI_HWKM_ICE_RG_BANK0_BANKN_ESR			0x2010
+#define QTI_HWKM_ICE_RG_BANK0_BANKN_ESR_IRQ_MASK	0x2014
+#define QTI_HWKM_ICE_RG_BANK0_BANKN_ESYNR		0x2018
+#define QTI_HWKM_ICE_RG_BANK0_CMD_0			0x201C
+#define QTI_HWKM_ICE_RG_BANK0_CMD_1			0x2020
+#define QTI_HWKM_ICE_RG_BANK0_CMD_2			0x2024
+#define QTI_HWKM_ICE_RG_BANK0_CMD_3			0x2028
+#define QTI_HWKM_ICE_RG_BANK0_CMD_4			0x202C
+#define QTI_HWKM_ICE_RG_BANK0_CMD_5			0x2030
+#define QTI_HWKM_ICE_RG_BANK0_CMD_6			0x2034
+#define QTI_HWKM_ICE_RG_BANK0_CMD_7			0x2038
+#define QTI_HWKM_ICE_RG_BANK0_CMD_8			0x203C
+#define QTI_HWKM_ICE_RG_BANK0_CMD_9			0x2040
+#define QTI_HWKM_ICE_RG_BANK0_CMD_10			0x2044
+#define QTI_HWKM_ICE_RG_BANK0_CMD_11			0x2048
+#define QTI_HWKM_ICE_RG_BANK0_CMD_12			0x204C
+#define QTI_HWKM_ICE_RG_BANK0_CMD_13			0x2050
+#define QTI_HWKM_ICE_RG_BANK0_CMD_14			0x2054
+#define QTI_HWKM_ICE_RG_BANK0_CMD_15			0x2058
+#define QTI_HWKM_ICE_RG_BANK0_RSP_0			0x205C
+#define QTI_HWKM_ICE_RG_BANK0_RSP_1			0x2060
+#define QTI_HWKM_ICE_RG_BANK0_RSP_2			0x2064
+#define QTI_HWKM_ICE_RG_BANK0_RSP_3			0x2068
+#define QTI_HWKM_ICE_RG_BANK0_RSP_4			0x206C
+#define QTI_HWKM_ICE_RG_BANK0_RSP_5			0x2070
+#define QTI_HWKM_ICE_RG_BANK0_RSP_6			0x2074
+#define QTI_HWKM_ICE_RG_BANK0_RSP_7			0x2078
+#define QTI_HWKM_ICE_RG_BANK0_RSP_8			0x207C
+#define QTI_HWKM_ICE_RG_BANK0_RSP_9			0x2080
+#define QTI_HWKM_ICE_RG_BANK0_RSP_10			0x2084
+#define QTI_HWKM_ICE_RG_BANK0_RSP_11			0x2088
+#define QTI_HWKM_ICE_RG_BANK0_RSP_12			0x208C
+#define QTI_HWKM_ICE_RG_BANK0_RSP_13			0x2090
+#define QTI_HWKM_ICE_RG_BANK0_RSP_14			0x2094
+#define QTI_HWKM_ICE_RG_BANK0_RSP_15			0x2098
+#define QTI_HWKM_ICE_RG_BANK0_BANKN_IRQ_ROUTING		0x209C
+#define QTI_HWKM_ICE_RG_BANK0_BANKN_BBAC_0		0x20A0
+#define QTI_HWKM_ICE_RG_BANK0_BANKN_BBAC_1		0x20A4
+#define QTI_HWKM_ICE_RG_BANK0_BANKN_BBAC_2		0x20A8
+#define QTI_HWKM_ICE_RG_BANK0_BANKN_BBAC_3		0x20AC
+#define QTI_HWKM_ICE_RG_BANK0_BANKN_BBAC_4		0x20B0
+
+/* QTI HWKM access control registers for Bank 2 */
+#define QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_0		0x5000
+#define QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_1		0x5004
+#define QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_2		0x5008
+#define QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_3		0x500C
+#define QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_4		0x5010
+
+
+/* QTI HWKM ICE slave config reg vals */
+
+/* HWKM_ICEMEM_SLAVE_ICE_KM_RG_TZ_KM_CTL */
+#define CRC_CHECK_EN				0
+#define KEYTABLE_HW_WR_ACCESS_EN		1
+#define KEYTABLE_HW_RD_ACCESS_EN		2
+#define BOOT_INIT0_DISABLE			3
+#define BOOT_INIT1_DISABLE			4
+#define ICE_LEGACY_MODE_EN_OTP			5
+
+/* HWKM_ICEMEM_SLAVE_ICE_KM_RG_TZ_KM_STATUS */
+#define KT_CLEAR_DONE				0
+#define BOOT_CMD_LIST0_DONE			1
+#define BOOT_CMD_LIST1_DONE			2
+#define KEYTABLE_KEY_POLICY			3
+#define KEYTABLE_INTEGRITY_ERROR		4
+#define KEYTABLE_KEY_SLOT_ERROR			5
+#define KEYTABLE_KEY_SLOT_NOT_EVEN_ERROR	6
+#define KEYTABLE_KEY_SLOT_OUT_OF_RANGE		7
+#define KEYTABLE_KEY_SIZE_ERROR			8
+#define KEYTABLE_OPERATION_ERROR		9
+#define LAST_ACTIVITY_BANK			10
+#define CRYPTO_LIB_BIST_ERROR			13
+#define CRYPTO_LIB_BIST_DONE			14
+#define BIST_ERROR				15
+#define BIST_DONE				16
+#define LAST_ACTIVITY_BANK_MASK			0x1c00
+
+/* HWKM_ICEMEM_SLAVE_ICE_KM_RG_TZ_TPKEY_RECEIVE_CTL */
+#define TPKEY_EN				8
+
+/* QTI HWKM Bank status & control reg vals */
+
+/* HWKM_MASTER_CFG_KM_BANKN_CTL */
+#define CMD_ENABLE_BIT				0
+#define CMD_FIFO_CLEAR_BIT			1
+
+/* HWKM_MASTER_CFG_KM_BANKN_STATUS */
+#define CURRENT_CMD_REMAINING_LENGTH		0
+#define MOST_RECENT_OPCODE			5
+#define RSP_FIFO_AVAILABLE_DATA			9
+#define CMD_FIFO_AVAILABLE_SPACE		14
+#define ICE_LEGACY_MODE_BIT			19
+#define CMD_FIFO_AVAILABLE_SPACE_MASK		0x7c000
+#define RSP_FIFO_AVAILABLE_DATA_MASK		0x3e00
+#define MOST_RECENT_OPCODE_MASK			0x1e0
+#define CURRENT_CMD_REMAINING_LENGTH_MASK	0x1f
+
+/* HWKM_MASTER_CFG_KM_BANKN_IRQ_STATUS */
+#define ARB_GRAN_WINNER				0
+#define CMD_DONE_BIT				1
+#define RSP_FIFO_NOT_EMPTY			2
+#define RSP_FIFO_FULL				3
+#define RSP_FIFO_UNDERFLOW			4
+#define CMD_FIFO_UNDERFLOW			5
+
+#endif /* __QTI_HARDWARE_KEY_MANAGER_REGS_H_ */
diff --git a/drivers/soc/qcom/l2_reuse.c b/drivers/soc/qcom/l2_reuse.c
index e20b49a..bd96d38 100644
--- a/drivers/soc/qcom/l2_reuse.c
+++ b/drivers/soc/qcom/l2_reuse.c
@@ -13,7 +13,7 @@
 
 #define L2_REUSE_SMC_ID 0x00200090C
 
-static bool l2_reuse_enable = true;
+static bool l2_reuse_enable;
 static struct kobject *l2_reuse_kobj;
 
 static ssize_t sysfs_show(struct kobject *kobj,
@@ -38,12 +38,12 @@ static ssize_t sysfs_store(struct kobject *kobj,
 	return count;
 }
 
-struct kobj_attribute l2_reuse_attr = __ATTR(l2_reuse_enable, 0660,
+struct kobj_attribute l2_reuse_attr = __ATTR(extended_cache_enable, 0660,
 		sysfs_show, sysfs_store);
 
 static int __init l2_reuse_driver_init(void)
 {
-	l2_reuse_kobj = kobject_create_and_add("l2_reuse_enable", power_kobj);
+	l2_reuse_kobj = kobject_create_and_add("l2_reuse", power_kobj);
 
 	if (!l2_reuse_kobj) {
 		pr_info("kobj creation for l2_reuse failed\n");
diff --git a/drivers/soc/qcom/msm-spm.c b/drivers/soc/qcom/msm-spm.c
new file mode 100644
index 0000000..357c0d3
--- /dev/null
+++ b/drivers/soc/qcom/msm-spm.c
@@ -0,0 +1,771 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2011-2017, 2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include "spm_driver.h"
+
+#define MSM_SPM_PMIC_STATE_IDLE  0
+
+enum {
+	MSM_SPM_DEBUG_SHADOW = 1U << 0,
+	MSM_SPM_DEBUG_VCTL = 1U << 1,
+};
+
+static int msm_spm_debug_mask;
+module_param_named(
+	debug_mask, msm_spm_debug_mask, int, 0664
+);
+
+struct saw2_data {
+	const char *ver_name;
+	uint32_t major;
+	uint32_t minor;
+	uint32_t *spm_reg_offset_ptr;
+};
+
+static uint32_t msm_spm_reg_offsets_saw2_v2_1[MSM_SPM_REG_NR] = {
+	[MSM_SPM_REG_SAW_SECURE]		= 0x00,
+	[MSM_SPM_REG_SAW_ID]			= 0x04,
+	[MSM_SPM_REG_SAW_CFG]			= 0x08,
+	[MSM_SPM_REG_SAW_SPM_STS]		= 0x0C,
+	[MSM_SPM_REG_SAW_AVS_STS]		= 0x10,
+	[MSM_SPM_REG_SAW_PMIC_STS]		= 0x14,
+	[MSM_SPM_REG_SAW_RST]			= 0x18,
+	[MSM_SPM_REG_SAW_VCTL]			= 0x1C,
+	[MSM_SPM_REG_SAW_AVS_CTL]		= 0x20,
+	[MSM_SPM_REG_SAW_AVS_LIMIT]		= 0x24,
+	[MSM_SPM_REG_SAW_AVS_DLY]		= 0x28,
+	[MSM_SPM_REG_SAW_AVS_HYSTERESIS]	= 0x2C,
+	[MSM_SPM_REG_SAW_SPM_CTL]		= 0x30,
+	[MSM_SPM_REG_SAW_SPM_DLY]		= 0x34,
+	[MSM_SPM_REG_SAW_PMIC_DATA_0]		= 0x40,
+	[MSM_SPM_REG_SAW_PMIC_DATA_1]		= 0x44,
+	[MSM_SPM_REG_SAW_PMIC_DATA_2]		= 0x48,
+	[MSM_SPM_REG_SAW_PMIC_DATA_3]		= 0x4C,
+	[MSM_SPM_REG_SAW_PMIC_DATA_4]		= 0x50,
+	[MSM_SPM_REG_SAW_PMIC_DATA_5]		= 0x54,
+	[MSM_SPM_REG_SAW_PMIC_DATA_6]		= 0x58,
+	[MSM_SPM_REG_SAW_PMIC_DATA_7]		= 0x5C,
+	[MSM_SPM_REG_SAW_SEQ_ENTRY]		= 0x80,
+	[MSM_SPM_REG_SAW_VERSION]		= 0xFD0,
+};
+
+static uint32_t msm_spm_reg_offsets_saw2_v3_0[MSM_SPM_REG_NR] = {
+	[MSM_SPM_REG_SAW_SECURE]		= 0x00,
+	[MSM_SPM_REG_SAW_ID]			= 0x04,
+	[MSM_SPM_REG_SAW_CFG]			= 0x08,
+	[MSM_SPM_REG_SAW_SPM_STS]		= 0x0C,
+	[MSM_SPM_REG_SAW_AVS_STS]		= 0x10,
+	[MSM_SPM_REG_SAW_PMIC_STS]		= 0x14,
+	[MSM_SPM_REG_SAW_RST]			= 0x18,
+	[MSM_SPM_REG_SAW_VCTL]			= 0x1C,
+	[MSM_SPM_REG_SAW_AVS_CTL]		= 0x20,
+	[MSM_SPM_REG_SAW_AVS_LIMIT]		= 0x24,
+	[MSM_SPM_REG_SAW_AVS_DLY]		= 0x28,
+	[MSM_SPM_REG_SAW_AVS_HYSTERESIS]	= 0x2C,
+	[MSM_SPM_REG_SAW_SPM_CTL]		= 0x30,
+	[MSM_SPM_REG_SAW_SPM_DLY]		= 0x34,
+	[MSM_SPM_REG_SAW_STS2]			= 0x38,
+	[MSM_SPM_REG_SAW_PMIC_DATA_0]		= 0x40,
+	[MSM_SPM_REG_SAW_PMIC_DATA_1]		= 0x44,
+	[MSM_SPM_REG_SAW_PMIC_DATA_2]		= 0x48,
+	[MSM_SPM_REG_SAW_PMIC_DATA_3]		= 0x4C,
+	[MSM_SPM_REG_SAW_PMIC_DATA_4]		= 0x50,
+	[MSM_SPM_REG_SAW_PMIC_DATA_5]		= 0x54,
+	[MSM_SPM_REG_SAW_PMIC_DATA_6]		= 0x58,
+	[MSM_SPM_REG_SAW_PMIC_DATA_7]		= 0x5C,
+	[MSM_SPM_REG_SAW_SEQ_ENTRY]		= 0x400,
+	[MSM_SPM_REG_SAW_VERSION]		= 0xFD0,
+};
+
+static uint32_t msm_spm_reg_offsets_saw2_v4_1[MSM_SPM_REG_NR] = {
+	[MSM_SPM_REG_SAW_SECURE]		= 0xC00,
+	[MSM_SPM_REG_SAW_ID]			= 0xC04,
+	[MSM_SPM_REG_SAW_STS2]			= 0xC10,
+	[MSM_SPM_REG_SAW_SPM_STS]		= 0xC0C,
+	[MSM_SPM_REG_SAW_AVS_STS]		= 0xC14,
+	[MSM_SPM_REG_SAW_PMIC_STS]		= 0xC18,
+	[MSM_SPM_REG_SAW_RST]			= 0xC1C,
+	[MSM_SPM_REG_SAW_VCTL]			= 0x900,
+	[MSM_SPM_REG_SAW_AVS_CTL]		= 0x904,
+	[MSM_SPM_REG_SAW_AVS_LIMIT]		= 0x908,
+	[MSM_SPM_REG_SAW_AVS_DLY]		= 0x90C,
+	[MSM_SPM_REG_SAW_SPM_CTL]		= 0x0,
+	[MSM_SPM_REG_SAW_SPM_DLY]		= 0x4,
+	[MSM_SPM_REG_SAW_CFG]			= 0x0C,
+	[MSM_SPM_REG_SAW_PMIC_DATA_0]		= 0x40,
+	[MSM_SPM_REG_SAW_PMIC_DATA_1]		= 0x44,
+	[MSM_SPM_REG_SAW_PMIC_DATA_2]		= 0x48,
+	[MSM_SPM_REG_SAW_PMIC_DATA_3]		= 0x4C,
+	[MSM_SPM_REG_SAW_PMIC_DATA_4]		= 0x50,
+	[MSM_SPM_REG_SAW_PMIC_DATA_5]		= 0x54,
+	[MSM_SPM_REG_SAW_SEQ_ENTRY]		= 0x400,
+	[MSM_SPM_REG_SAW_VERSION]		= 0xFD0,
+};
+
+static struct saw2_data saw2_info[] = {
+	[0] = {
+		"SAW_v2.1",
+		0x2,
+		0x1,
+		msm_spm_reg_offsets_saw2_v2_1,
+	},
+	[1] = {
+		"SAW_v2.3",
+		0x3,
+		0x0,
+		msm_spm_reg_offsets_saw2_v3_0,
+	},
+	[2] = {
+		"SAW_v3.0",
+		0x1,
+		0x0,
+		msm_spm_reg_offsets_saw2_v3_0,
+	},
+	[3] = {
+		"SAW_v4.0",
+		0x4,
+		0x1,
+		msm_spm_reg_offsets_saw2_v4_1,
+	},
+};
+
+static uint32_t num_pmic_data;
+
+static void msm_spm_drv_flush_shadow(struct msm_spm_driver_data *dev,
+		unsigned int reg_index)
+{
+	if (!dev)
+		return;
+
+	__raw_writel(dev->reg_shadow[reg_index],
+		dev->reg_base_addr + dev->reg_offsets[reg_index]);
+}
+
+static void msm_spm_drv_load_shadow(struct msm_spm_driver_data *dev,
+		unsigned int reg_index)
+{
+	dev->reg_shadow[reg_index] =
+		__raw_readl(dev->reg_base_addr +
+				dev->reg_offsets[reg_index]);
+}
+
+static inline uint32_t msm_spm_drv_get_num_spm_entry(
+		struct msm_spm_driver_data *dev)
+{
+	if (!dev)
+		return -ENODEV;
+
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_ID);
+	return (dev->reg_shadow[MSM_SPM_REG_SAW_ID] >> 24) & 0xFF;
+}
+
+static inline void msm_spm_drv_set_start_addr(
+		struct msm_spm_driver_data *dev, uint32_t ctl)
+{
+	dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL] = ctl;
+}
+
+static inline bool msm_spm_pmic_arb_present(struct msm_spm_driver_data *dev)
+{
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_ID);
+	return (dev->reg_shadow[MSM_SPM_REG_SAW_ID] >> 2) & 0x1;
+}
+
+static inline void msm_spm_drv_set_vctl2(struct msm_spm_driver_data *dev,
+				uint32_t vlevel, uint32_t vctl_port)
+{
+	unsigned int pmic_data = 0;
+
+	pmic_data |= vlevel;
+	pmic_data |= (vctl_port & 0x7) << 16;
+
+	dev->reg_shadow[MSM_SPM_REG_SAW_VCTL] &= ~0x700FF;
+	dev->reg_shadow[MSM_SPM_REG_SAW_VCTL] |= pmic_data;
+
+	dev->reg_shadow[MSM_SPM_REG_SAW_PMIC_DATA_3] &= ~0x700FF;
+	dev->reg_shadow[MSM_SPM_REG_SAW_PMIC_DATA_3] |= pmic_data;
+
+	msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_VCTL);
+	msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_PMIC_DATA_3);
+}
+
+static inline uint32_t msm_spm_drv_get_num_pmic_data(
+		struct msm_spm_driver_data *dev)
+{
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_ID);
+	mb(); /* Ensure we flush */
+	return (dev->reg_shadow[MSM_SPM_REG_SAW_ID] >> 4) & 0x7;
+}
+
+static inline uint32_t msm_spm_drv_get_sts_pmic_state(
+		struct msm_spm_driver_data *dev)
+{
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_PMIC_STS);
+	return (dev->reg_shadow[MSM_SPM_REG_SAW_PMIC_STS] >> 16) &
+				0x03;
+}
+
+uint32_t msm_spm_drv_get_sts_curr_pmic_data(
+		struct msm_spm_driver_data *dev)
+{
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_PMIC_STS);
+	return dev->reg_shadow[MSM_SPM_REG_SAW_PMIC_STS] & 0x300FF;
+}
+
+static inline void msm_spm_drv_get_saw2_ver(struct msm_spm_driver_data *dev,
+		uint32_t *major, uint32_t *minor)
+{
+	uint32_t val = 0;
+
+	dev->reg_shadow[MSM_SPM_REG_SAW_VERSION] =
+			__raw_readl(dev->reg_base_addr + dev->ver_reg);
+
+	val = dev->reg_shadow[MSM_SPM_REG_SAW_VERSION];
+
+	*major = (val >> 28) & 0xF;
+	*minor = (val >> 16) & 0xFFF;
+}
+
+inline int msm_spm_drv_set_spm_enable(
+		struct msm_spm_driver_data *dev, bool enable)
+{
+	uint32_t value = enable ? 0x01 : 0x00;
+
+	if (!dev)
+		return -EINVAL;
+
+	if ((dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL] & 0x01) ^ value) {
+
+		dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL] &= ~0x1;
+		dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL] |= value;
+
+		msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_SPM_CTL);
+		wmb(); /* Ensure we flush */
+	}
+	return 0;
+}
+
+int msm_spm_drv_get_avs_enable(struct msm_spm_driver_data *dev)
+{
+	if (!dev)
+		return -EINVAL;
+
+	return dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] & 0x01;
+}
+
+int msm_spm_drv_set_avs_enable(struct msm_spm_driver_data *dev,
+		 bool enable)
+{
+	uint32_t value = enable ? 0x1 : 0x0;
+
+	if (!dev)
+		return -EINVAL;
+
+	if ((dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] & 0x1) ^ value) {
+		dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] &= ~0x1;
+		dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= value;
+
+		msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
+	}
+
+	return 0;
+}
+
+int msm_spm_drv_set_avs_limit(struct msm_spm_driver_data *dev,
+		uint32_t min_lvl, uint32_t max_lvl)
+{
+	uint32_t value = (max_lvl & 0xff) << 16 | (min_lvl & 0xff);
+
+	if (!dev)
+		return -EINVAL;
+
+	dev->reg_shadow[MSM_SPM_REG_SAW_AVS_LIMIT] = value;
+
+	msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_LIMIT);
+
+	return 0;
+}
+
+static int msm_spm_drv_avs_irq_mask(enum msm_spm_avs_irq irq)
+{
+	switch (irq) {
+	case MSM_SPM_AVS_IRQ_MIN:
+		return BIT(1);
+	case MSM_SPM_AVS_IRQ_MAX:
+		return BIT(2);
+	default:
+		return -EINVAL;
+	}
+}
+
+int msm_spm_drv_set_avs_irq_enable(struct msm_spm_driver_data *dev,
+		enum msm_spm_avs_irq irq, bool enable)
+{
+	int mask = msm_spm_drv_avs_irq_mask(irq);
+	uint32_t value;
+
+	if (!dev)
+		return -EINVAL;
+	else if (mask < 0)
+		return mask;
+
+	value = enable ? mask : 0;
+
+	if ((dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] & mask) ^ value) {
+		dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] &= ~mask;
+		dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= value;
+		msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
+	}
+
+	return 0;
+}
+
+int msm_spm_drv_avs_clear_irq(struct msm_spm_driver_data *dev,
+		enum msm_spm_avs_irq irq)
+{
+	int mask = msm_spm_drv_avs_irq_mask(irq);
+
+	if (!dev)
+		return -EINVAL;
+	else if (mask < 0)
+		return mask;
+
+	if (dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] & mask) {
+		/*
+		 * The interrupt status is cleared by disabling and then
+		 * re-enabling the interrupt.
+		 */
+		dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] &= ~mask;
+		msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
+		dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= mask;
+		msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
+	}
+
+	return 0;
+}
+
+void msm_spm_drv_flush_seq_entry(struct msm_spm_driver_data *dev)
+{
+	int i;
+	int num_spm_entry = msm_spm_drv_get_num_spm_entry(dev);
+
+	if (!dev) {
+		__WARN();
+		return;
+	}
+
+	for (i = 0; i < num_spm_entry; i++) {
+		__raw_writel(dev->reg_seq_entry_shadow[i],
+			dev->reg_base_addr
+			+ dev->reg_offsets[MSM_SPM_REG_SAW_SEQ_ENTRY]
+			+ 4 * i);
+	}
+	mb(); /* Ensure we flush */
+}
+
+void dump_regs(struct msm_spm_driver_data *dev, int cpu)
+{
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_SPM_STS);
+	mb(); /* Ensure we flush */
+	pr_err("CPU%d: spm register MSM_SPM_REG_SAW_SPM_STS: 0x%x\n", cpu,
+			dev->reg_shadow[MSM_SPM_REG_SAW_SPM_STS]);
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_SPM_CTL);
+	mb(); /* Ensure we flush */
+	pr_err("CPU%d: spm register MSM_SPM_REG_SAW_SPM_CTL: 0x%x\n", cpu,
+			dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL]);
+}
+
+int msm_spm_drv_write_seq_data(struct msm_spm_driver_data *dev,
+		uint8_t *cmd, uint32_t *offset)
+{
+	uint32_t cmd_w;
+	uint32_t offset_w = *offset / 4;
+	uint8_t last_cmd;
+
+	if (!cmd)
+		return -EINVAL;
+
+	while (1) {
+		int i;
+
+		cmd_w = 0;
+		last_cmd = 0;
+		cmd_w = dev->reg_seq_entry_shadow[offset_w];
+
+		for (i = (*offset % 4); i < 4; i++) {
+			last_cmd = *(cmd++);
+			cmd_w |=  last_cmd << (i * 8);
+			(*offset)++;
+			if (last_cmd == 0x0f)
+				break;
+		}
+
+		dev->reg_seq_entry_shadow[offset_w++] = cmd_w;
+		if (last_cmd == 0x0f)
+			break;
+	}
+
+	return 0;
+}
+
+int msm_spm_drv_set_low_power_mode(struct msm_spm_driver_data *dev,
+		uint32_t ctl)
+{
+
+	/* SPM is configured to reset start address to zero after end of Program
+	 */
+	if (!dev)
+		return -EINVAL;
+
+	msm_spm_drv_set_start_addr(dev, ctl);
+
+	msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_SPM_CTL);
+	wmb(); /* Ensure we flush */
+
+	if (msm_spm_debug_mask & MSM_SPM_DEBUG_SHADOW) {
+		int i;
+
+		for (i = 0; i < MSM_SPM_REG_NR; i++)
+			pr_info("%s: reg %02x = 0x%08x\n", __func__,
+				dev->reg_offsets[i], dev->reg_shadow[i]);
+	}
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_SPM_STS);
+
+	return 0;
+}
+
+uint32_t msm_spm_drv_get_vdd(struct msm_spm_driver_data *dev)
+{
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_PMIC_STS);
+	return dev->reg_shadow[MSM_SPM_REG_SAW_PMIC_STS] & 0xFF;
+}
+
+#ifdef CONFIG_MSM_AVS_HW
+static bool msm_spm_drv_is_avs_enabled(struct msm_spm_driver_data *dev)
+{
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
+	return dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] & BIT(0);
+}
+
+static void msm_spm_drv_disable_avs(struct msm_spm_driver_data *dev)
+{
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
+	dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] &= ~BIT(27);
+	msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
+}
+
+static void msm_spm_drv_enable_avs(struct msm_spm_driver_data *dev)
+{
+	dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= BIT(27);
+	msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
+}
+
+static void msm_spm_drv_set_avs_vlevel(struct msm_spm_driver_data *dev,
+		unsigned int vlevel)
+{
+	vlevel &= 0x3f;
+	dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] &= ~0x7efc00;
+	dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= ((vlevel - 4) << 10);
+	dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= (vlevel << 17);
+	msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
+}
+
+#else
+static bool msm_spm_drv_is_avs_enabled(struct msm_spm_driver_data *dev)
+{
+	return false;
+}
+
+static void msm_spm_drv_disable_avs(struct msm_spm_driver_data *dev) { }
+
+static void msm_spm_drv_enable_avs(struct msm_spm_driver_data *dev) { }
+
+static void msm_spm_drv_set_avs_vlevel(struct msm_spm_driver_data *dev,
+		unsigned int vlevel)
+{
+}
+#endif
+
+static inline int msm_spm_drv_validate_data(struct msm_spm_driver_data *dev,
+					unsigned int vlevel, int vctl_port)
+{
+	int timeout_us = dev->vctl_timeout_us;
+	uint32_t new_level;
+
+	/* Confirm the voltage we set was what hardware sent and
+	 * FSM is idle.
+	 */
+	do {
+		udelay(1);
+		new_level = msm_spm_drv_get_sts_curr_pmic_data(dev);
+
+		/**
+		 * VCTL_PORT has to be 0, for vlevel to be updated.
+		 * If port is not 0, check for PMIC_STATE only.
+		 */
+
+		if (((new_level & 0x30000) == MSM_SPM_PMIC_STATE_IDLE) &&
+				(vctl_port || ((new_level & 0xFF) == vlevel)))
+			break;
+	} while (--timeout_us);
+
+	if (!timeout_us) {
+		pr_err("Wrong level %#x\n", new_level);
+		return -EIO;
+	}
+
+	if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL)
+		pr_info("%s: done, remaining timeout %u us\n",
+			__func__, timeout_us);
+
+	return 0;
+}
+
+int msm_spm_drv_set_vdd(struct msm_spm_driver_data *dev, unsigned int vlevel)
+{
+	uint32_t vlevel_set = vlevel;
+	bool avs_enabled;
+	int ret = 0;
+
+	if (!dev)
+		return -EINVAL;
+
+	avs_enabled  = msm_spm_drv_is_avs_enabled(dev);
+
+	if (!msm_spm_pmic_arb_present(dev))
+		return -ENODEV;
+
+	if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL)
+		pr_info("%s: requesting vlevel %#x\n", __func__, vlevel);
+
+	if (avs_enabled)
+		msm_spm_drv_disable_avs(dev);
+
+	if (dev->vctl_port_ub >= 0) {
+		/**
+		 * VCTL can send 8bit voltage level at once.
+		 * Send lower 8bit first, vlevel change happens
+		 * when upper 8bit is sent.
+		 */
+		vlevel = vlevel_set & 0xFF;
+	}
+
+	/* Kick the state machine back to idle */
+	dev->reg_shadow[MSM_SPM_REG_SAW_RST] = 1;
+	msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_RST);
+
+	msm_spm_drv_set_vctl2(dev, vlevel, dev->vctl_port);
+
+	ret = msm_spm_drv_validate_data(dev, vlevel, dev->vctl_port);
+	if (ret)
+		goto set_vdd_bail;
+
+	if (dev->vctl_port_ub >= 0) {
+		/* Send upper 8bit of voltage level */
+		vlevel = (vlevel_set >> 8) & 0xFF;
+
+		/* Kick the state machine back to idle */
+		dev->reg_shadow[MSM_SPM_REG_SAW_RST] = 1;
+		msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_RST);
+
+		/*
+		 * Steps for sending for vctl port other than '0'
+		 * Write VCTL register with pmic data and address index
+		 * Perform system barrier
+		 * Wait for 1us
+		 * Read PMIC_STS register to make sure operation is complete
+		 */
+		msm_spm_drv_set_vctl2(dev, vlevel, dev->vctl_port_ub);
+
+		mb(); /* To make sure data is sent before checking status */
+
+		ret = msm_spm_drv_validate_data(dev, vlevel, dev->vctl_port_ub);
+		if (ret)
+			goto set_vdd_bail;
+	}
+
+	/* Set AVS min/max */
+	if (avs_enabled) {
+		msm_spm_drv_set_avs_vlevel(dev, vlevel_set);
+		msm_spm_drv_enable_avs(dev);
+	}
+
+	return ret;
+
+set_vdd_bail:
+	if (avs_enabled)
+		msm_spm_drv_enable_avs(dev);
+
+	pr_err("%s: failed %#x vlevel setting in timeout %uus\n",
+			__func__, vlevel_set, dev->vctl_timeout_us);
+	return -EIO;
+}
+
+static int msm_spm_drv_get_pmic_port(struct msm_spm_driver_data *dev,
+		enum msm_spm_pmic_port port)
+{
+	int index = -1;
+
+	switch (port) {
+	case MSM_SPM_PMIC_VCTL_PORT:
+		index = dev->vctl_port;
+		break;
+	case MSM_SPM_PMIC_PHASE_PORT:
+		index = dev->phase_port;
+		break;
+	case MSM_SPM_PMIC_PFM_PORT:
+		index = dev->pfm_port;
+		break;
+	default:
+		break;
+	}
+
+	return index;
+}
+
+int msm_spm_drv_set_pmic_data(struct msm_spm_driver_data *dev,
+		enum msm_spm_pmic_port port, unsigned int data)
+{
+	unsigned int pmic_data = 0;
+	unsigned int timeout_us = 0;
+	int index = 0;
+
+	if (!msm_spm_pmic_arb_present(dev))
+		return -ENODEV;
+
+	index = msm_spm_drv_get_pmic_port(dev, port);
+	if (index < 0)
+		return -ENODEV;
+
+	pmic_data |= data & 0xFF;
+	pmic_data |= (index & 0x7) << 16;
+
+	dev->reg_shadow[MSM_SPM_REG_SAW_VCTL] &= ~0x700FF;
+	dev->reg_shadow[MSM_SPM_REG_SAW_VCTL] |= pmic_data;
+	msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_VCTL);
+	mb(); /* Ensure we flush */
+
+	timeout_us = dev->vctl_timeout_us;
+	/**
+	 * Confirm the pmic data set was what hardware sent by
+	 * checking the PMIC FSM state.
+	 * We cannot use the sts_pmic_data and check it against
+	 * the value like we do fot set_vdd, since the PMIC_STS
+	 * is only updated for SAW_VCTL sent with port index 0.
+	 */
+	do {
+		if (msm_spm_drv_get_sts_pmic_state(dev) ==
+				MSM_SPM_PMIC_STATE_IDLE)
+			break;
+		udelay(1);
+	} while (--timeout_us);
+
+	if (!timeout_us) {
+		pr_err("%s: failed, remaining timeout %u us, data %d\n",
+				__func__, timeout_us, data);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+void msm_spm_drv_reinit(struct msm_spm_driver_data *dev, bool seq_write)
+{
+	int i;
+
+	if (seq_write)
+		msm_spm_drv_flush_seq_entry(dev);
+
+	for (i = 0; i < MSM_SPM_REG_SAW_PMIC_DATA_0 + num_pmic_data; i++)
+		msm_spm_drv_load_shadow(dev, i);
+
+	for (i = MSM_SPM_REG_NR_INITIALIZE + 1; i < MSM_SPM_REG_NR; i++)
+		msm_spm_drv_load_shadow(dev, i);
+}
+
+int msm_spm_drv_reg_init(struct msm_spm_driver_data *dev,
+		struct msm_spm_platform_data *data)
+{
+	int i;
+	bool found = false;
+
+	dev->ver_reg = data->ver_reg;
+	dev->reg_base_addr = data->reg_base_addr;
+	msm_spm_drv_get_saw2_ver(dev, &dev->major, &dev->minor);
+	for (i = 0; i < ARRAY_SIZE(saw2_info); i++)
+		if (dev->major == saw2_info[i].major &&
+			dev->minor == saw2_info[i].minor) {
+			pr_debug("%s: Version found\n",
+					saw2_info[i].ver_name);
+			dev->reg_offsets = saw2_info[i].spm_reg_offset_ptr;
+			found = true;
+			break;
+		}
+
+	if (!found) {
+		pr_err("%s: No SAW version found\n", __func__);
+		WARN_ON(!found);
+	}
+	return 0;
+}
+
+void msm_spm_drv_upd_reg_shadow(struct msm_spm_driver_data *dev, int id,
+		int val)
+{
+	dev->reg_shadow[id] = val;
+	msm_spm_drv_flush_shadow(dev, id);
+	/* Complete the above writes before other accesses */
+	mb();
+}
+
+int msm_spm_drv_init(struct msm_spm_driver_data *dev,
+		struct msm_spm_platform_data *data)
+{
+	int num_spm_entry;
+
+	if (!dev || !data)
+		return -ENODEV;
+
+	dev->vctl_port = data->vctl_port;
+	dev->vctl_port_ub = data->vctl_port_ub;
+	dev->phase_port = data->phase_port;
+	dev->pfm_port = data->pfm_port;
+	dev->reg_base_addr = data->reg_base_addr;
+	memcpy(dev->reg_shadow, data->reg_init_values,
+			sizeof(data->reg_init_values));
+
+	dev->vctl_timeout_us = data->vctl_timeout_us;
+
+
+	if (!num_pmic_data)
+		num_pmic_data = msm_spm_drv_get_num_pmic_data(dev);
+
+	num_spm_entry = msm_spm_drv_get_num_spm_entry(dev);
+
+	dev->reg_seq_entry_shadow =
+		kcalloc(num_spm_entry, sizeof(*dev->reg_seq_entry_shadow),
+				GFP_KERNEL);
+
+	if (!dev->reg_seq_entry_shadow)
+		return -ENOMEM;
+
+	return 0;
+}
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index 33b4fbd..2233010 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -597,7 +597,6 @@ static int spcom_handle_create_channel_command(void *cmd_buf, int cmd_size)
 {
 	int ret = 0;
 	struct spcom_user_create_channel_command *cmd = cmd_buf;
-	const size_t maxlen = sizeof(cmd->ch_name);
 
 	if (cmd_size != sizeof(*cmd)) {
 		spcom_pr_err("cmd_size [%d] , expected [%d]\n",
@@ -605,11 +604,6 @@ static int spcom_handle_create_channel_command(void *cmd_buf, int cmd_size)
 		return -EINVAL;
 	}
 
-	if (strnlen(cmd->ch_name, maxlen) == maxlen) {
-		spcom_pr_err("channel name is not NULL terminated\n");
-		return -EINVAL;
-	}
-
 	ret = spcom_create_channel_chardev(cmd->ch_name, cmd->is_sharable);
 
 	return ret;
@@ -2002,6 +1996,12 @@ static int spcom_create_channel_chardev(const char *name, bool is_sharable)
 	void *priv;
 	struct cdev *cdev;
 
+	if (!name || strnlen(name, SPCOM_CHANNEL_NAME_SIZE) ==
+			SPCOM_CHANNEL_NAME_SIZE) {
+		spcom_pr_err("invalid channel name\n");
+		return -EINVAL;
+	}
+
 	spcom_pr_dbg("creating channel [%s]\n", name);
 	mutex_lock(&spcom_dev->create_channel_lock);
 
@@ -2040,7 +2040,12 @@ static int spcom_create_channel_chardev(const char *name, bool is_sharable)
 
 	devt = spcom_dev->device_no + atomic_read(&spcom_dev->chdev_count);
 	priv = ch;
-	dev = device_create(cls, parent, devt, priv, name);
+
+	/*
+	 * Pass channel name as formatted string to avoid abuse by using a
+	 * formatted string as channel name
+	 */
+	dev = device_create(cls, parent, devt, priv, "%s", name);
 	if (IS_ERR(dev)) {
 		spcom_pr_err("device_create failed\n");
 		ret = -ENODEV;
diff --git a/drivers/soc/qcom/spm_devices.c b/drivers/soc/qcom/spm_devices.c
new file mode 100644
index 0000000..b1f15f2
--- /dev/null
+++ b/drivers/soc/qcom/spm_devices.c
@@ -0,0 +1,1011 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2011-2020  The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/cpu.h>
+#include <soc/qcom/spm.h>
+#include "spm_driver.h"
+
+#define VDD_DEFAULT 0xDEADF00D
+#define SLP_CMD_BIT 17
+#define PC_MODE_BIT 16
+#define RET_MODE_BIT 15
+#define EVENT_SYNC_BIT 24
+#define ISAR_BIT 3
+#define SPM_EN_BIT 0
+
+struct msm_spm_power_modes {
+	uint32_t mode;
+	uint32_t ctl;
+};
+
+struct msm_spm_device {
+	struct list_head list;
+	bool initialized;
+	const char *name;
+	struct msm_spm_driver_data reg_data;
+	struct msm_spm_power_modes *modes;
+	uint32_t num_modes;
+	uint32_t cpu_vdd;
+	struct cpumask mask;
+	void __iomem *q2s_reg;
+	bool qchannel_ignore;
+	bool allow_rpm_hs;
+	bool use_spm_clk_gating;
+	bool use_qchannel_for_wfi;
+	void __iomem *flush_base_addr;
+	void __iomem *slpreq_base_addr;
+};
+
+struct msm_spm_vdd_info {
+	struct msm_spm_device *vctl_dev;
+	uint32_t vlevel;
+	int err;
+};
+
+static LIST_HEAD(spm_list);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_spm_device, msm_cpu_spm_device);
+static DEFINE_PER_CPU(struct msm_spm_device *, cpu_vctl_device);
+
+static void msm_spm_smp_set_vdd(void *data)
+{
+	struct msm_spm_vdd_info *info = (struct msm_spm_vdd_info *)data;
+	struct msm_spm_device *dev = info->vctl_dev;
+
+	dev->cpu_vdd = info->vlevel;
+	info->err = msm_spm_drv_set_vdd(&dev->reg_data, info->vlevel);
+}
+
+/**
+ * msm_spm_probe_done(): Verify and return the status of the cpu(s) and l2
+ * probe.
+ * Return: 0 if all spm devices have been probed, else return -EPROBE_DEFER.
+ * if probe failed, then return the err number for that failure.
+ */
+int msm_spm_probe_done(void)
+{
+	struct msm_spm_device *dev;
+	int cpu;
+	int ret = 0;
+
+	for_each_possible_cpu(cpu) {
+		dev = per_cpu(cpu_vctl_device, cpu);
+		if (!dev)
+			return -EPROBE_DEFER;
+
+		ret = IS_ERR(dev);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_spm_probe_done);
+
+void msm_spm_dump_regs(unsigned int cpu)
+{
+	dump_regs(&per_cpu(msm_cpu_spm_device, cpu).reg_data, cpu);
+}
+
+/**
+ * msm_spm_set_vdd(): Set core voltage
+ * @cpu: core id
+ * @vlevel: Encoded PMIC data.
+ *
+ * Return: 0 on success or -(ERRNO) on failure.
+ */
+int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel)
+{
+	struct msm_spm_vdd_info info;
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+	int ret;
+
+	if (!dev)
+		return -EPROBE_DEFER;
+
+	ret = IS_ERR(dev);
+	if (ret)
+		return ret;
+
+	info.vctl_dev = dev;
+	info.vlevel = vlevel;
+
+	ret = smp_call_function_any(&dev->mask, msm_spm_smp_set_vdd, &info,
+					true);
+	if (ret)
+		return ret;
+
+	return info.err;
+}
+EXPORT_SYMBOL(msm_spm_set_vdd);
+
+/**
+ * msm_spm_get_vdd(): Get core voltage
+ * @cpu: core id
+ * @return: Returns encoded PMIC data.
+ */
+int msm_spm_get_vdd(unsigned int cpu)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -EPROBE_DEFER;
+
+	return msm_spm_drv_get_vdd(&dev->reg_data) ? : -EINVAL;
+}
+EXPORT_SYMBOL(msm_spm_get_vdd);
+
+static void msm_spm_config_q2s(struct msm_spm_device *dev, unsigned int mode)
+{
+	uint32_t spm_legacy_mode = 0;
+	uint32_t qchannel_ignore = 0;
+	uint32_t val = 0;
+
+	if (!dev->q2s_reg)
+		return;
+
+	switch (mode) {
+	case MSM_SPM_MODE_DISABLED:
+	case MSM_SPM_MODE_CLOCK_GATING:
+		qchannel_ignore = !dev->use_qchannel_for_wfi;
+		spm_legacy_mode = 0;
+		break;
+	case MSM_SPM_MODE_RETENTION:
+		qchannel_ignore = 0;
+		spm_legacy_mode = 0;
+		break;
+	case MSM_SPM_MODE_GDHS:
+	case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
+	case MSM_SPM_MODE_POWER_COLLAPSE:
+		qchannel_ignore = dev->qchannel_ignore;
+		spm_legacy_mode = 1;
+		break;
+	default:
+		break;
+	}
+
+	val = spm_legacy_mode << 2 | qchannel_ignore << 1;
+	__raw_writel(val, dev->q2s_reg);
+	mb(); /* Ensure flush */
+}
+
+static void msm_spm_config_hw_flush(struct msm_spm_device *dev,
+		unsigned int mode)
+{
+	uint32_t val = 0;
+
+	if (!dev->flush_base_addr)
+		return;
+
+	switch (mode) {
+	case MSM_SPM_MODE_FASTPC:
+	case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
+	case MSM_SPM_MODE_POWER_COLLAPSE:
+		val = BIT(0);
+		break;
+	default:
+		break;
+	}
+
+	__raw_writel(val, dev->flush_base_addr);
+}
+
+static void msm_spm_config_slpreq(struct msm_spm_device *dev,
+		unsigned int mode)
+{
+	uint32_t val = 0;
+
+	if (!dev->slpreq_base_addr)
+		return;
+
+	switch (mode) {
+	case MSM_SPM_MODE_FASTPC:
+	case MSM_SPM_MODE_GDHS:
+	case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
+	case MSM_SPM_MODE_POWER_COLLAPSE:
+		val = BIT(4);
+		break;
+	default:
+		break;
+	}
+
+	val = (__raw_readl(dev->slpreq_base_addr) & ~BIT(4)) | val;
+	__raw_writel(val, dev->slpreq_base_addr);
+}
+
+static int msm_spm_dev_set_low_power_mode(struct msm_spm_device *dev,
+		unsigned int mode, bool notify_rpm, bool set_spm_enable)
+{
+	uint32_t i;
+	int ret = -EINVAL;
+	uint32_t ctl = 0;
+
+	if (!dev) {
+		pr_err("dev is NULL\n");
+		return -ENODEV;
+	}
+
+	if (!dev->initialized)
+		return -ENXIO;
+
+	if (!dev->num_modes)
+		return 0;
+
+	if (mode == MSM_SPM_MODE_DISABLED && set_spm_enable) {
+		ret = msm_spm_drv_set_spm_enable(&dev->reg_data, false);
+	} else {
+		if (set_spm_enable)
+			ret = msm_spm_drv_set_spm_enable(&dev->reg_data, true);
+		for (i = 0; i < dev->num_modes; i++) {
+			if (dev->modes[i].mode != mode)
+				continue;
+
+			ctl = dev->modes[i].ctl;
+			if (!dev->allow_rpm_hs && notify_rpm)
+				ctl &= ~BIT(SLP_CMD_BIT);
+
+			break;
+		}
+		ret = msm_spm_drv_set_low_power_mode(&dev->reg_data, ctl);
+	}
+
+	msm_spm_config_q2s(dev, mode);
+	msm_spm_config_hw_flush(dev, mode);
+	msm_spm_config_slpreq(dev, mode);
+
+	return ret;
+}
+
+static int msm_spm_dev_init(struct msm_spm_device *dev,
+		struct msm_spm_platform_data *data)
+{
+	int i, ret = -ENOMEM;
+	uint32_t offset = 0;
+
+	dev->cpu_vdd = VDD_DEFAULT;
+	dev->num_modes = data->num_modes;
+	dev->modes = kmalloc_array(
+			dev->num_modes, sizeof(struct msm_spm_power_modes),
+			GFP_KERNEL);
+
+	if (!dev->modes)
+		goto spm_failed_malloc;
+
+	ret = msm_spm_drv_init(&dev->reg_data, data);
+
+	if (ret)
+		goto spm_failed_init;
+
+	for (i = 0; i < dev->num_modes; i++) {
+
+		/* Default offset is 0 and gets updated as we write more
+		 * sequences into SPM
+		 */
+		dev->modes[i].ctl = data->modes[i].ctl | ((offset & 0x1FF)
+						<< 4);
+		ret = msm_spm_drv_write_seq_data(&dev->reg_data,
+						data->modes[i].cmd, &offset);
+		if (ret < 0)
+			goto spm_failed_init;
+
+		dev->modes[i].mode = data->modes[i].mode;
+	}
+
+	msm_spm_drv_reinit(&dev->reg_data, dev->num_modes ? true : false);
+
+	dev->initialized = true;
+
+	return 0;
+
+spm_failed_init:
+	kfree(dev->modes);
+spm_failed_malloc:
+	return ret;
+}
+
+/**
+ * msm_spm_turn_on_cpu_rail(): Power on cpu rail before turning on core
+ * @node: The SPM node that controls the voltage for the CPU
+ * @val: The value to be set on the rail
+ * @cpu: The cpu for this with rail is being powered on
+ */
+int msm_spm_turn_on_cpu_rail(struct device_node *vctl_node,
+		unsigned int val, int cpu, int vctl_offset)
+{
+	uint32_t timeout = 2000; /* delay for voltage to settle on the core */
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+	void __iomem *base;
+
+	base = of_iomap(vctl_node, 1);
+	if (base) {
+		/*
+		 * Program Q2S to disable SPM legacy mode and ignore Q2S
+		 * channel requests.
+		 * bit[1] = qchannel_ignore = 1
+		 * bit[2] = spm_legacy_mode = 0
+		 */
+		writel_relaxed(0x2, base);
+		mb(); /* Ensure flush */
+		iounmap(base);
+	}
+
+	base = of_iomap(vctl_node, 0);
+	if (!base)
+		return -ENOMEM;
+
+	if (dev && (dev->cpu_vdd != VDD_DEFAULT))
+		return 0;
+
+	/* Set the CPU supply regulator voltage */
+	val = (val & 0xFF);
+	writel_relaxed(val, base + vctl_offset);
+	mb(); /* Ensure flush */
+	udelay(timeout);
+
+	/* Enable the CPU supply regulator*/
+	val = 0x30080;
+	writel_relaxed(val, base + vctl_offset);
+	mb(); /* Ensure flush */
+	udelay(timeout);
+
+	iounmap(base);
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_spm_turn_on_cpu_rail);
+
+void msm_spm_reinit(void)
+{
+	unsigned int cpu;
+
+	for_each_possible_cpu(cpu)
+		msm_spm_drv_reinit(
+			&per_cpu(msm_cpu_spm_device.reg_data, cpu), true);
+}
+EXPORT_SYMBOL(msm_spm_reinit);
+
+/*
+ * msm_spm_is_mode_avail() - Specifies if a mode is available for the cpu
+ * It should only be used to decide a mode before lpm driver is probed.
+ * @mode: SPM LPM mode to be selected
+ */
+bool msm_spm_is_mode_avail(unsigned int mode)
+{
+	struct msm_spm_device *dev = this_cpu_ptr(&msm_cpu_spm_device);
+	int i;
+
+	for (i = 0; i < dev->num_modes; i++) {
+		if (dev->modes[i].mode == mode)
+			return true;
+	}
+
+	return false;
+}
+
+/**
+ * msm_spm_is_avs_enabled() - Functions returns 1 if AVS is enabled and
+ *			      0 if it is not.
+ * @cpu: specifies cpu's avs should be read
+ *
+ * Returns errno in case of failure or AVS enable state otherwise
+ */
+int msm_spm_is_avs_enabled(unsigned int cpu)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -ENXIO;
+
+	return msm_spm_drv_get_avs_enable(&dev->reg_data);
+}
+EXPORT_SYMBOL(msm_spm_is_avs_enabled);
+
+/**
+ * msm_spm_avs_enable() - Enables AVS on the SAW that controls this cpu's
+ *			  voltage.
+ * @cpu: specifies which cpu's avs should be enabled
+ *
+ * Returns errno in case of failure or 0 if successful
+ */
+int msm_spm_avs_enable(unsigned int cpu)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -ENXIO;
+
+	return msm_spm_drv_set_avs_enable(&dev->reg_data, true);
+}
+EXPORT_SYMBOL(msm_spm_avs_enable);
+
+/**
+ * msm_spm_avs_disable() - Disables AVS on the SAW that controls this cpu's
+ *			   voltage.
+ * @cpu: specifies which cpu's avs should be enabled
+ *
+ * Returns errno in case of failure or 0 if successful
+ */
+int msm_spm_avs_disable(unsigned int cpu)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -ENXIO;
+
+	return msm_spm_drv_set_avs_enable(&dev->reg_data, false);
+}
+EXPORT_SYMBOL(msm_spm_avs_disable);
+
+/**
+ * msm_spm_avs_set_limit() - Set maximum and minimum AVS limits on the
+ *			     SAW that controls this cpu's voltage.
+ * @cpu: specify which cpu's avs should be configured
+ * @min_lvl: specifies the minimum PMIC output voltage control register
+ *		value that may be sent to the PMIC
+ * @max_lvl: specifies the maximum PMIC output voltage control register
+ *		value that may be sent to the PMIC
+ * Returns errno in case of failure or 0 if successful
+ */
+int msm_spm_avs_set_limit(unsigned int cpu,
+		uint32_t min_lvl, uint32_t max_lvl)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -ENXIO;
+
+	return msm_spm_drv_set_avs_limit(&dev->reg_data, min_lvl, max_lvl);
+}
+EXPORT_SYMBOL(msm_spm_avs_set_limit);
+
+/**
+ * msm_spm_avs_enable_irq() - Enable an AVS interrupt
+ * @cpu: specifies which CPU's AVS should be configured
+ * @irq: specifies which interrupt to enable
+ *
+ * Returns errno in case of failure or 0 if successful.
+ */
+int msm_spm_avs_enable_irq(unsigned int cpu, enum msm_spm_avs_irq irq)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -ENXIO;
+
+	return msm_spm_drv_set_avs_irq_enable(&dev->reg_data, irq, true);
+}
+EXPORT_SYMBOL(msm_spm_avs_enable_irq);
+
+/**
+ * msm_spm_avs_disable_irq() - Disable an AVS interrupt
+ * @cpu: specifies which CPU's AVS should be configured
+ * @irq: specifies which interrupt to disable
+ *
+ * Returns errno in case of failure or 0 if successful.
+ */
+int msm_spm_avs_disable_irq(unsigned int cpu, enum msm_spm_avs_irq irq)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -ENXIO;
+
+	return msm_spm_drv_set_avs_irq_enable(&dev->reg_data, irq, false);
+}
+EXPORT_SYMBOL(msm_spm_avs_disable_irq);
+
+/**
+ * msm_spm_avs_clear_irq() - Clear a latched AVS interrupt
+ * @cpu: specifies which CPU's AVS should be configured
+ * @irq: specifies which interrupt to clear
+ *
+ * Returns errno in case of failure or 0 if successful.
+ */
+int msm_spm_avs_clear_irq(unsigned int cpu, enum msm_spm_avs_irq irq)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -ENXIO;
+
+	return msm_spm_drv_avs_clear_irq(&dev->reg_data, irq);
+}
+EXPORT_SYMBOL(msm_spm_avs_clear_irq);
+
+/**
+ * msm_spm_set_low_power_mode() - Configure SPM start address for low power mode
+ * @mode: SPM LPM mode to enter
+ * @notify_rpm: Notify RPM in this mode
+ */
+int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm)
+{
+	struct msm_spm_device *dev = this_cpu_ptr(&msm_cpu_spm_device);
+
+	return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm, true);
+}
+EXPORT_SYMBOL(msm_spm_set_low_power_mode);
+
+void msm_spm_set_rpm_hs(bool allow_rpm_hs)
+{
+	struct msm_spm_device *dev = this_cpu_ptr(&msm_cpu_spm_device);
+
+	dev->allow_rpm_hs = allow_rpm_hs;
+}
+EXPORT_SYMBOL(msm_spm_set_rpm_hs);
+
+int msm_spm_config_low_power_mode_addr(struct msm_spm_device *dev,
+		unsigned int mode, bool notify_rpm)
+{
+	return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm, false);
+}
+
+/**
+ * msm_spm_init(): Board initalization function
+ * @data: platform specific SPM register configuration data
+ * @nr_devs: Number of SPM devices being initialized
+ */
+int __init msm_spm_init(struct msm_spm_platform_data *data, int nr_devs)
+{
+	unsigned int cpu;
+	int ret = 0;
+
+	if ((nr_devs < num_possible_cpus()) || !data)
+		return -EINVAL;
+
+	for_each_possible_cpu(cpu) {
+		struct msm_spm_device *dev = &per_cpu(msm_cpu_spm_device, cpu);
+
+		ret = msm_spm_dev_init(dev, &data[cpu]);
+		if (ret < 0) {
+			pr_warn("%s():failed CPU:%u ret:%d\n", __func__,
+					cpu, ret);
+			break;
+		}
+	}
+
+	return ret;
+}
+
+struct msm_spm_device *msm_spm_get_device_by_name(const char *name)
+{
+	struct list_head *list;
+
+	list_for_each(list, &spm_list) {
+		struct msm_spm_device *dev
+			= list_entry(list, typeof(*dev), list);
+		if (dev->name && !strcmp(dev->name, name))
+			return dev;
+	}
+	return ERR_PTR(-ENODEV);
+}
+
+int msm_spm_config_low_power_mode(struct msm_spm_device *dev,
+		unsigned int mode, bool notify_rpm)
+{
+	return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm, true);
+}
+#ifdef CONFIG_MSM_L2_SPM
+
+/**
+ * msm_spm_apcs_set_phase(): Set number of SMPS phases.
+ * @cpu: cpu which is requesting the change in number of phases.
+ * @phase_cnt: Number of phases to be set active
+ */
+int msm_spm_apcs_set_phase(int cpu, unsigned int phase_cnt)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -ENXIO;
+
+	return msm_spm_drv_set_pmic_data(&dev->reg_data,
+			MSM_SPM_PMIC_PHASE_PORT, phase_cnt);
+}
+EXPORT_SYMBOL(msm_spm_apcs_set_phase);
+
+/** msm_spm_enable_fts_lpm() : Enable FTS to switch to low power
+ *                             when the cores are in low power modes
+ * @cpu: cpu that is entering low power mode.
+ * @mode: The mode configuration for FTS
+ */
+int msm_spm_enable_fts_lpm(int cpu, uint32_t mode)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -ENXIO;
+
+	return msm_spm_drv_set_pmic_data(&dev->reg_data,
+			MSM_SPM_PMIC_PFM_PORT, mode);
+}
+EXPORT_SYMBOL(msm_spm_enable_fts_lpm);
+
+#endif
+
+static int get_cpu_id(struct device_node *node)
+{
+	struct device_node *cpu_node;
+	u32 cpu;
+	char *key = "qcom,cpu";
+
+	cpu_node = of_parse_phandle(node, key, 0);
+	if (cpu_node) {
+		for_each_possible_cpu(cpu) {
+			if (of_get_cpu_node(cpu, NULL) == cpu_node)
+				return cpu;
+		}
+	} else
+		return num_possible_cpus();
+
+	return -EINVAL;
+}
+
+static struct msm_spm_device *msm_spm_get_device(struct platform_device *pdev)
+{
+	struct msm_spm_device *dev = NULL;
+	const char *val = NULL;
+	char *key = "qcom,name";
+	int cpu = get_cpu_id(pdev->dev.of_node);
+
+	if ((cpu >= 0) && cpu < num_possible_cpus())
+		dev = &per_cpu(msm_cpu_spm_device, cpu);
+	else if (cpu == num_possible_cpus())
+		dev = devm_kzalloc(&pdev->dev, sizeof(struct msm_spm_device),
+					GFP_KERNEL);
+
+	if (!dev)
+		return NULL;
+
+	if (of_property_read_string(pdev->dev.of_node, key, &val)) {
+		pr_err("%s(): Cannot find a required node key:%s\n",
+				__func__, key);
+		return NULL;
+	}
+	dev->name = val;
+	list_add(&dev->list, &spm_list);
+
+	return dev;
+}
+
+static void get_cpumask(struct device_node *node, struct cpumask *mask)
+{
+	unsigned int c;
+	int idx = 0;
+	struct device_node *cpu_node;
+	char *key = "qcom,cpu-vctl-list";
+
+	cpu_node = of_parse_phandle(node, key, idx++);
+	while (cpu_node) {
+		for_each_possible_cpu(c) {
+			if (of_get_cpu_node(c, NULL) == cpu_node)
+				cpumask_set_cpu(c, mask);
+		}
+		cpu_node = of_parse_phandle(node, key, idx++);
+	};
+}
+
+static int msm_spm_dev_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	int cpu = 0;
+	int i = 0;
+	struct device_node *node = pdev->dev.of_node;
+	struct device_node *n = NULL;
+	struct msm_spm_platform_data spm_data;
+	char *key = NULL;
+	uint32_t val = 0;
+	struct msm_spm_seq_entry modes[MSM_SPM_MODE_NR];
+	int len = 0;
+	struct msm_spm_device *dev = NULL;
+	struct resource *res = NULL;
+	uint32_t mode_count = 0;
+
+	struct spm_of {
+		char *key;
+		uint32_t id;
+	};
+
+	struct spm_of spm_of_data[] = {
+		{"qcom,saw2-cfg", MSM_SPM_REG_SAW_CFG},
+		{"qcom,saw2-avs-ctl", MSM_SPM_REG_SAW_AVS_CTL},
+		{"qcom,saw2-avs-hysteresis", MSM_SPM_REG_SAW_AVS_HYSTERESIS},
+		{"qcom,saw2-avs-limit", MSM_SPM_REG_SAW_AVS_LIMIT},
+		{"qcom,saw2-avs-dly", MSM_SPM_REG_SAW_AVS_DLY},
+		{"qcom,saw2-spm-dly", MSM_SPM_REG_SAW_SPM_DLY},
+		{"qcom,saw2-spm-ctl", MSM_SPM_REG_SAW_SPM_CTL},
+		{"qcom,saw2-pmic-data0", MSM_SPM_REG_SAW_PMIC_DATA_0},
+		{"qcom,saw2-pmic-data1", MSM_SPM_REG_SAW_PMIC_DATA_1},
+		{"qcom,saw2-pmic-data2", MSM_SPM_REG_SAW_PMIC_DATA_2},
+		{"qcom,saw2-pmic-data3", MSM_SPM_REG_SAW_PMIC_DATA_3},
+		{"qcom,saw2-pmic-data4", MSM_SPM_REG_SAW_PMIC_DATA_4},
+		{"qcom,saw2-pmic-data5", MSM_SPM_REG_SAW_PMIC_DATA_5},
+		{"qcom,saw2-pmic-data6", MSM_SPM_REG_SAW_PMIC_DATA_6},
+		{"qcom,saw2-pmic-data7", MSM_SPM_REG_SAW_PMIC_DATA_7},
+	};
+
+	struct mode_of {
+		char *key;
+		uint32_t id;
+	};
+
+	struct mode_of mode_of_data[] = {
+		{"qcom,saw2-spm-cmd-wfi", MSM_SPM_MODE_CLOCK_GATING},
+		{"qcom,saw2-spm-cmd-ret", MSM_SPM_MODE_RETENTION},
+		{"qcom,saw2-spm-cmd-gdhs", MSM_SPM_MODE_GDHS},
+		{"qcom,saw2-spm-cmd-spc",
+				MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE},
+		{"qcom,saw2-spm-cmd-pc", MSM_SPM_MODE_POWER_COLLAPSE},
+		{"qcom,saw2-spm-cmd-fpc", MSM_SPM_MODE_FASTPC},
+	};
+
+	dev = msm_spm_get_device(pdev);
+	if (!dev) {
+		/*
+		 * For partial goods support some CPUs might not be available
+		 * in which case, shouldn't throw an error
+		 */
+		return 0;
+	}
+	get_cpumask(node, &dev->mask);
+
+	memset(&spm_data, 0, sizeof(struct msm_spm_platform_data));
+	memset(&modes, 0,
+		(MSM_SPM_MODE_NR - 2) * sizeof(struct msm_spm_seq_entry));
+
+	key = "qcom,saw2-ver-reg";
+	ret = of_property_read_u32(node, key, &val);
+	if (ret)
+		goto fail;
+	spm_data.ver_reg = val;
+
+	key = "qcom,vctl-timeout-us";
+	ret = of_property_read_u32(node, key, &val);
+	if (!ret)
+		spm_data.vctl_timeout_us = val;
+
+	/* SAW start address */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	spm_data.reg_base_addr = devm_ioremap(&pdev->dev, res->start,
+					resource_size(res));
+	if (!spm_data.reg_base_addr) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	spm_data.vctl_port = -1;
+	spm_data.vctl_port_ub = -1;
+	spm_data.phase_port = -1;
+	spm_data.pfm_port = -1;
+
+	key = "qcom,vctl-port";
+	of_property_read_u32(node, key, &spm_data.vctl_port);
+
+	key = "qcom,vctl-port-ub";
+	of_property_read_u32(node, key, &spm_data.vctl_port_ub);
+
+	key = "qcom,phase-port";
+	of_property_read_u32(node, key, &spm_data.phase_port);
+
+	key = "qcom,pfm-port";
+	of_property_read_u32(node, key, &spm_data.pfm_port);
+
+	/* Q2S (QChannel-2-SPM) register */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "q2s");
+	if (res) {
+		dev->q2s_reg = devm_ioremap(&pdev->dev, res->start,
+						resource_size(res));
+		if (!dev->q2s_reg) {
+			pr_err("%s(): Unable to iomap Q2S register\n",
+					__func__);
+			ret = -EADDRNOTAVAIL;
+			goto fail;
+		}
+	}
+
+	key = "qcom,use-qchannel-for-pc";
+	dev->qchannel_ignore = !of_property_read_bool(node, key);
+
+	key = "qcom,use-spm-clock-gating";
+	dev->use_spm_clk_gating = of_property_read_bool(node, key);
+
+	key = "qcom,use-qchannel-for-wfi";
+	dev->use_qchannel_for_wfi = of_property_read_bool(node, key);
+
+	/* HW flush address */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hw-flush");
+	if (res) {
+		dev->flush_base_addr = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(dev->flush_base_addr)) {
+			ret = PTR_ERR(dev->flush_base_addr);
+			pr_err("%s(): Unable to iomap hw flush register %d\n",
+					__func__, ret);
+			goto fail;
+		}
+	}
+
+	/* Sleep req address */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "slpreq");
+	if (res) {
+		dev->slpreq_base_addr = devm_ioremap(&pdev->dev, res->start,
+					resource_size(res));
+		if (!dev->slpreq_base_addr) {
+			ret = -ENOMEM;
+			pr_err("%s(): Unable to iomap slpreq register\n",
+					__func__);
+			ret = -EADDRNOTAVAIL;
+			goto fail;
+		}
+	}
+
+	/*
+	 * At system boot, cpus and or clusters can remain in reset. CCI SPM
+	 * will not be triggered unless SPM_LEGACY_MODE bit is set for the
+	 * cluster in reset. Initialize q2s registers and set the
+	 * SPM_LEGACY_MODE bit.
+	 */
+	msm_spm_config_q2s(dev, MSM_SPM_MODE_POWER_COLLAPSE);
+	msm_spm_drv_reg_init(&dev->reg_data, &spm_data);
+
+	for (i = 0; i < ARRAY_SIZE(spm_of_data); i++) {
+		ret = of_property_read_u32(node, spm_of_data[i].key, &val);
+		if (ret)
+			continue;
+		msm_spm_drv_upd_reg_shadow(&dev->reg_data, spm_of_data[i].id,
+				val);
+	}
+
+	for_each_child_of_node(node, n) {
+		const char *name;
+		bool bit_set;
+		int sync;
+
+		if (!n->name)
+			continue;
+
+		ret = of_property_read_string(n, "qcom,label", &name);
+		if (ret)
+			continue;
+
+		for (i = 0; i < ARRAY_SIZE(mode_of_data); i++)
+			if (!strcmp(name, mode_of_data[i].key))
+				break;
+
+		if (i == ARRAY_SIZE(mode_of_data)) {
+			pr_err("Mode name invalid %s\n", name);
+			break;
+		}
+
+		modes[mode_count].mode = mode_of_data[i].id;
+		modes[mode_count].cmd =
+			(uint8_t *)of_get_property(n, "qcom,sequence", &len);
+		if (!modes[mode_count].cmd) {
+			pr_err("cmd is empty\n");
+			continue;
+		}
+
+		bit_set = of_property_read_bool(n, "qcom,pc_mode");
+		modes[mode_count].ctl |= bit_set ? BIT(PC_MODE_BIT) : 0;
+
+		bit_set = of_property_read_bool(n, "qcom,ret_mode");
+		modes[mode_count].ctl |= bit_set ? BIT(RET_MODE_BIT) : 0;
+
+		bit_set = of_property_read_bool(n, "qcom,slp_cmd_mode");
+		modes[mode_count].ctl |= bit_set ? BIT(SLP_CMD_BIT) : 0;
+
+		bit_set = of_property_read_bool(n, "qcom,isar");
+		modes[mode_count].ctl |= bit_set ? BIT(ISAR_BIT) : 0;
+
+		bit_set = of_property_read_bool(n, "qcom,spm_en");
+		modes[mode_count].ctl |= bit_set ? BIT(SPM_EN_BIT) : 0;
+
+		ret = of_property_read_u32(n, "qcom,event_sync", &sync);
+		if (!ret)
+			modes[mode_count].ctl |= sync << EVENT_SYNC_BIT;
+
+		mode_count++;
+	}
+
+	spm_data.modes = modes;
+	spm_data.num_modes = mode_count;
+
+	key = "qcom,supports-rpm-hs";
+	dev->allow_rpm_hs = of_property_read_bool(pdev->dev.of_node, key);
+
+	ret = msm_spm_dev_init(dev, &spm_data);
+	if (ret)
+		pr_err("SPM modes programming is not available from HLOS\n");
+
+	platform_set_drvdata(pdev, dev);
+
+	for_each_cpu(cpu, &dev->mask)
+		per_cpu(cpu_vctl_device, cpu) = dev;
+
+	if (!spm_data.num_modes)
+		return 0;
+
+	cpu = get_cpu_id(pdev->dev.of_node);
+
+	/* For CPUs that are online, the SPM has to be programmed for
+	 * clockgating mode to ensure that it can use SPM for entering these
+	 * low power modes.
+	 */
+	get_online_cpus();
+	if ((cpu >= 0) && (cpu < num_possible_cpus()) && (cpu_online(cpu)))
+		msm_spm_config_low_power_mode(dev, MSM_SPM_MODE_CLOCK_GATING,
+				false);
+	put_online_cpus();
+	return ret;
+
+fail:
+	cpu = get_cpu_id(pdev->dev.of_node);
+	if (dev && (cpu >= num_possible_cpus() || (cpu < 0))) {
+		for_each_cpu(cpu, &dev->mask)
+			per_cpu(cpu_vctl_device, cpu) = ERR_PTR(ret);
+	}
+
+	pr_err("%s: CPU%d SPM device probe failed: %d\n", __func__, cpu, ret);
+
+	return ret;
+}
+
+static int msm_spm_dev_remove(struct platform_device *pdev)
+{
+	struct msm_spm_device *dev = platform_get_drvdata(pdev);
+
+	list_del(&dev->list);
+	return 0;
+}
+
+static const struct of_device_id msm_spm_match_table[] = {
+	{.compatible = "qcom,spm-v2"},
+	{},
+};
+
+static struct platform_driver msm_spm_device_driver = {
+	.probe = msm_spm_dev_probe,
+	.remove = msm_spm_dev_remove,
+	.driver = {
+		.name = "spm-v2",
+		.of_match_table = msm_spm_match_table,
+	},
+};
+
+/**
+ * msm_spm_device_init(): Device tree initialization function
+ */
+int __init msm_spm_device_init(void)
+{
+	static bool registered;
+
+	if (registered)
+		return 0;
+	registered = true;
+	return platform_driver_register(&msm_spm_device_driver);
+}
+arch_initcall(msm_spm_device_init);
diff --git a/drivers/soc/qcom/spm_driver.h b/drivers/soc/qcom/spm_driver.h
new file mode 100644
index 0000000..f362ecd
--- /dev/null
+++ b/drivers/soc/qcom/spm_driver.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2011-2017, 2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __ARCH_ARM_MACH_MSM_SPM_DEVICES_H
+#define __ARCH_ARM_MACH_MSM_SPM_DEVICES_H
+
+#include <soc/qcom/spm.h>
+
+enum {
+	MSM_SPM_REG_SAW_CFG,
+	MSM_SPM_REG_SAW_AVS_CTL,
+	MSM_SPM_REG_SAW_AVS_HYSTERESIS,
+	MSM_SPM_REG_SAW_SPM_CTL,
+	MSM_SPM_REG_SAW_PMIC_DLY,
+	MSM_SPM_REG_SAW_AVS_LIMIT,
+	MSM_SPM_REG_SAW_AVS_DLY,
+	MSM_SPM_REG_SAW_SPM_DLY,
+	MSM_SPM_REG_SAW_PMIC_DATA_0,
+	MSM_SPM_REG_SAW_PMIC_DATA_1,
+	MSM_SPM_REG_SAW_PMIC_DATA_2,
+	MSM_SPM_REG_SAW_PMIC_DATA_3,
+	MSM_SPM_REG_SAW_PMIC_DATA_4,
+	MSM_SPM_REG_SAW_PMIC_DATA_5,
+	MSM_SPM_REG_SAW_PMIC_DATA_6,
+	MSM_SPM_REG_SAW_PMIC_DATA_7,
+	MSM_SPM_REG_SAW_RST,
+
+	MSM_SPM_REG_NR_INITIALIZE = MSM_SPM_REG_SAW_RST,
+
+	MSM_SPM_REG_SAW_ID,
+	MSM_SPM_REG_SAW_SECURE,
+	MSM_SPM_REG_SAW_STS0,
+	MSM_SPM_REG_SAW_STS1,
+	MSM_SPM_REG_SAW_STS2,
+	MSM_SPM_REG_SAW_VCTL,
+	MSM_SPM_REG_SAW_SEQ_ENTRY,
+	MSM_SPM_REG_SAW_SPM_STS,
+	MSM_SPM_REG_SAW_AVS_STS,
+	MSM_SPM_REG_SAW_PMIC_STS,
+	MSM_SPM_REG_SAW_VERSION,
+
+	MSM_SPM_REG_NR,
+};
+
+struct msm_spm_seq_entry {
+	uint32_t mode;
+	uint8_t *cmd;
+	uint32_t ctl;
+};
+
+struct msm_spm_platform_data {
+	void __iomem *reg_base_addr;
+	uint32_t reg_init_values[MSM_SPM_REG_NR_INITIALIZE];
+
+	uint32_t ver_reg;
+	uint32_t vctl_port;
+	int vctl_port_ub;
+	uint32_t phase_port;
+	uint32_t pfm_port;
+
+	uint8_t awake_vlevel;
+	uint32_t vctl_timeout_us;
+	uint32_t avs_timeout_us;
+
+	uint32_t num_modes;
+	struct msm_spm_seq_entry *modes;
+};
+
+enum msm_spm_pmic_port {
+	MSM_SPM_PMIC_VCTL_PORT,
+	MSM_SPM_PMIC_PHASE_PORT,
+	MSM_SPM_PMIC_PFM_PORT,
+};
+
+struct msm_spm_driver_data {
+	uint32_t major;
+	uint32_t minor;
+	uint32_t ver_reg;
+	uint32_t vctl_port;
+	int vctl_port_ub;
+	uint32_t phase_port;
+	uint32_t pfm_port;
+	void __iomem *reg_base_addr;
+	uint32_t vctl_timeout_us;
+	uint32_t avs_timeout_us;
+	uint32_t reg_shadow[MSM_SPM_REG_NR];
+	uint32_t *reg_seq_entry_shadow;
+	uint32_t *reg_offsets;
+};
+
+int msm_spm_drv_init(struct msm_spm_driver_data *dev,
+		struct msm_spm_platform_data *data);
+int msm_spm_drv_reg_init(struct msm_spm_driver_data *dev,
+		struct msm_spm_platform_data *data);
+void msm_spm_drv_reinit(struct msm_spm_driver_data *dev, bool seq);
+int msm_spm_drv_set_low_power_mode(struct msm_spm_driver_data *dev,
+		uint32_t ctl);
+int msm_spm_drv_set_vdd(struct msm_spm_driver_data *dev,
+		unsigned int vlevel);
+void dump_regs(struct msm_spm_driver_data *dev, int cpu);
+uint32_t msm_spm_drv_get_sts_curr_pmic_data(
+		struct msm_spm_driver_data *dev);
+int msm_spm_drv_write_seq_data(struct msm_spm_driver_data *dev,
+		uint8_t *cmd, uint32_t *offset);
+void msm_spm_drv_flush_seq_entry(struct msm_spm_driver_data *dev);
+int msm_spm_drv_set_spm_enable(struct msm_spm_driver_data *dev,
+		bool enable);
+int msm_spm_drv_set_pmic_data(struct msm_spm_driver_data *dev,
+		enum msm_spm_pmic_port port, unsigned int data);
+
+int msm_spm_drv_set_avs_limit(struct msm_spm_driver_data *dev,
+		 uint32_t min_lvl, uint32_t max_lvl);
+
+int msm_spm_drv_set_avs_enable(struct msm_spm_driver_data *dev,
+		 bool enable);
+int msm_spm_drv_get_avs_enable(struct msm_spm_driver_data *dev);
+
+int msm_spm_drv_set_avs_irq_enable(struct msm_spm_driver_data *dev,
+		enum msm_spm_avs_irq irq, bool enable);
+int msm_spm_drv_avs_clear_irq(struct msm_spm_driver_data *dev,
+		enum msm_spm_avs_irq irq);
+
+void msm_spm_reinit(void);
+int msm_spm_init(struct msm_spm_platform_data *data, int nr_devs);
+void msm_spm_drv_upd_reg_shadow(struct msm_spm_driver_data *dev, int id,
+		int val);
+uint32_t msm_spm_drv_get_vdd(struct msm_spm_driver_data *dev);
+#endif
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 8f7d544..3c2bd4c 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -108,7 +108,7 @@
 obj-$(CONFIG_SPI_XLP)			+= spi-xlp.o
 obj-$(CONFIG_SPI_XTENSA_XTFPGA)		+= spi-xtensa-xtfpga.o
 obj-$(CONFIG_SPI_ZYNQMP_GQSPI)		+= spi-zynqmp-gqspi.o
-
+obj-$(CONFIG_SPI_QUP)			+= spi_qsd.o
 # SPI slave protocol handlers
 obj-$(CONFIG_SPI_SLAVE_TIME)		+= spi-slave-time.o
 obj-$(CONFIG_SPI_SLAVE_SYSTEM_CONTROL)	+= spi-slave-system-control.o
diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c
new file mode 100644
index 0000000..aa3e69c
--- /dev/null
+++ b/drivers/spi/spi_qsd.c
@@ -0,0 +1,2768 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2008-2018, 2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * SPI driver for Qualcomm Technologies, Inc. MSM platforms
+ *
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/atomic.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/qcom-spi.h>
+#include <linux/msm-sps.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include "spi_qsd.h"
+
+#define SPI_MAX_BYTES_PER_WORD			(4)
+
+static int msm_spi_pm_resume_runtime(struct device *device);
+static int msm_spi_pm_suspend_runtime(struct device *device);
+static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd);
+static int get_local_resources(struct msm_spi *dd);
+static void put_local_resources(struct msm_spi *dd);
+
+static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
+					struct platform_device *pdev)
+{
+	struct resource *resource;
+	unsigned long   gsbi_mem_phys_addr;
+	size_t          gsbi_mem_size;
+	void __iomem    *gsbi_base;
+
+	resource  = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!resource)
+		return 0;
+
+	gsbi_mem_phys_addr = resource->start;
+	gsbi_mem_size = resource_size(resource);
+	if (!devm_request_mem_region(&pdev->dev, gsbi_mem_phys_addr,
+					gsbi_mem_size, SPI_DRV_NAME))
+		return -ENXIO;
+
+	gsbi_base = devm_ioremap(&pdev->dev, gsbi_mem_phys_addr,
+					gsbi_mem_size);
+	if (!gsbi_base)
+		return -ENXIO;
+
+	/* Set GSBI to SPI mode */
+	writel_relaxed(GSBI_SPI_CONFIG, gsbi_base + GSBI_CTRL_REG);
+
+	return 0;
+}
+
+static inline void msm_spi_register_init(struct msm_spi *dd)
+{
+	writel_relaxed(0x00000001, dd->base + SPI_SW_RESET);
+	msm_spi_set_state(dd, SPI_OP_STATE_RESET);
+	writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL);
+	writel_relaxed(0x00000000, dd->base + SPI_CONFIG);
+	writel_relaxed(0x00000000, dd->base + SPI_IO_MODES);
+	if (dd->qup_ver)
+		writel_relaxed(0x00000000, dd->base + QUP_OPERATIONAL_MASK);
+}
+
+static int msm_spi_pinctrl_init(struct msm_spi *dd)
+{
+	dd->pinctrl = devm_pinctrl_get(dd->dev);
+	if (IS_ERR_OR_NULL(dd->pinctrl)) {
+		dev_err(dd->dev, "Failed to get pin ctrl\n");
+		return PTR_ERR(dd->pinctrl);
+	}
+	dd->pins_active = pinctrl_lookup_state(dd->pinctrl,
+				SPI_PINCTRL_STATE_DEFAULT);
+	if (IS_ERR_OR_NULL(dd->pins_active)) {
+		dev_err(dd->dev, "Failed to lookup pinctrl default state\n");
+		return PTR_ERR(dd->pins_active);
+	}
+
+	dd->pins_sleep = pinctrl_lookup_state(dd->pinctrl,
+				SPI_PINCTRL_STATE_SLEEP);
+	if (IS_ERR_OR_NULL(dd->pins_sleep)) {
+		dev_err(dd->dev, "Failed to lookup pinctrl sleep state\n");
+		return PTR_ERR(dd->pins_sleep);
+	}
+
+	return 0;
+}
+
+static inline int msm_spi_request_gpios(struct msm_spi *dd)
+{
+	int i = 0;
+	int result = 0;
+
+	if (!dd->pdata->use_pinctrl) {
+		for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
+			if (dd->spi_gpios[i] >= 0) {
+				result = gpio_request(dd->spi_gpios[i],
+						spi_rsrcs[i]);
+				if (result) {
+					dev_err(dd->dev,
+					"error %d gpio_request for pin %d\n",
+					result, dd->spi_gpios[i]);
+					goto error;
+				}
+			}
+		}
+	} else {
+		result = pinctrl_select_state(dd->pinctrl, dd->pins_active);
+		if (result) {
+			dev_err(dd->dev, "%s: Can not set %s pins\n",
+			__func__, SPI_PINCTRL_STATE_DEFAULT);
+			goto error;
+		}
+	}
+	return 0;
+error:
+	if (!dd->pdata->use_pinctrl) {
+		for (; --i >= 0;) {
+			if (dd->spi_gpios[i] >= 0)
+				gpio_free(dd->spi_gpios[i]);
+		}
+	}
+	return result;
+}
+
+static inline void msm_spi_free_gpios(struct msm_spi *dd)
+{
+	int i;
+	int result = 0;
+
+	if (!dd->pdata->use_pinctrl) {
+		for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
+			if (dd->spi_gpios[i] >= 0)
+				gpio_free(dd->spi_gpios[i]);
+			}
+
+		for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
+			if (dd->cs_gpios[i].valid) {
+				gpio_free(dd->cs_gpios[i].gpio_num);
+				dd->cs_gpios[i].valid = false;
+			}
+		}
+	} else {
+		result = pinctrl_select_state(dd->pinctrl, dd->pins_sleep);
+		if (result)
+			dev_err(dd->dev, "%s: Can not set %s pins\n",
+			__func__, SPI_PINCTRL_STATE_SLEEP);
+	}
+}
+
+static inline int msm_spi_request_cs_gpio(struct msm_spi *dd)
+{
+	int cs_num;
+	int rc;
+
+	cs_num = dd->spi->chip_select;
+	if (!(dd->spi->mode & SPI_LOOP)) {
+		if (!dd->pdata->use_pinctrl) {
+			if ((!(dd->cs_gpios[cs_num].valid)) &&
+				(dd->cs_gpios[cs_num].gpio_num >= 0)) {
+				rc = gpio_request(dd->cs_gpios[cs_num].gpio_num,
+					spi_cs_rsrcs[cs_num]);
+
+				if (rc) {
+					dev_err(dd->dev,
+					"gpio_request for pin %d failed,error %d\n",
+					dd->cs_gpios[cs_num].gpio_num, rc);
+					return rc;
+				}
+				dd->cs_gpios[cs_num].valid = true;
+			}
+		}
+	}
+	return 0;
+}
+
+static inline void msm_spi_free_cs_gpio(struct msm_spi *dd)
+{
+	int cs_num;
+
+	cs_num = dd->spi->chip_select;
+	if (!dd->pdata->use_pinctrl) {
+		if (dd->cs_gpios[cs_num].valid) {
+			gpio_free(dd->cs_gpios[cs_num].gpio_num);
+			dd->cs_gpios[cs_num].valid = false;
+		}
+	}
+}
+
+
+/**
+ * msm_spi_clk_max_rate: finds the nearest lower rate for a clk
+ * @clk the clock for which to find nearest lower rate
+ * @rate clock frequency in Hz
+ * @return nearest lower rate or negative error value
+ *
+ * Public clock API extends clk_round_rate which is a ceiling function. This
+ * function is a floor function implemented as a binary search using the
+ * ceiling function.
+ */
+static long msm_spi_clk_max_rate(struct clk *clk, unsigned long rate)
+{
+	long lowest_available, nearest_low, step_size, cur;
+	long step_direction = -1;
+	long guess = rate;
+	int  max_steps = 10;
+
+	cur =  clk_round_rate(clk, rate);
+	if (cur == rate)
+		return rate;
+
+	/* if we got here then: cur > rate */
+	lowest_available =  clk_round_rate(clk, 0);
+	if (lowest_available > rate)
+		return -EINVAL;
+
+	step_size = (rate - lowest_available) >> 1;
+	nearest_low = lowest_available;
+
+	while (max_steps-- && step_size) {
+		guess += step_size * step_direction;
+
+		cur =  clk_round_rate(clk, guess);
+
+		if ((cur < rate) && (cur > nearest_low))
+			nearest_low = cur;
+
+		/*
+		 * if we stepped too far, then start stepping in the other
+		 * direction with half the step size
+		 */
+		if (((cur > rate) && (step_direction > 0))
+		 || ((cur < rate) && (step_direction < 0))) {
+			step_direction = -step_direction;
+			step_size >>= 1;
+		}
+	}
+	return nearest_low;
+}
+
+static void msm_spi_clock_set(struct msm_spi *dd, int speed)
+{
+	long rate;
+	int rc;
+
+	rate = msm_spi_clk_max_rate(dd->clk, speed);
+	if (rate < 0) {
+		dev_err(dd->dev,
+		"%s: no match found for requested clock frequency:%d\n",
+			__func__, speed);
+		return;
+	}
+
+	rc = clk_set_rate(dd->clk, rate);
+	if (!rc)
+		dd->clock_speed = rate;
+}
+
+static void msm_spi_clk_path_vote(struct msm_spi *dd, u32 rate)
+{
+	if (dd->bus_cl_hdl) {
+		u64 ib = rate * dd->pdata->bus_width;
+
+		msm_bus_scale_update_bw(dd->bus_cl_hdl, 0, ib);
+	}
+}
+
+static void msm_spi_clk_path_teardown(struct msm_spi *dd)
+{
+	msm_spi_clk_path_vote(dd, 0);
+
+	if (dd->bus_cl_hdl) {
+		msm_bus_scale_unregister(dd->bus_cl_hdl);
+		dd->bus_cl_hdl = NULL;
+	}
+}
+
+/**
+ * msm_spi_clk_path_postponed_register: reg with bus-scaling after it is probed
+ *
+ * @return zero on success
+ *
+ * Workaround: SPI driver may be probed before the bus scaling driver. Calling
+ * msm_bus_scale_register_client() will fail if the bus scaling driver is not
+ * ready yet. Thus, this function should be called not from probe but from a
+ * later context. Also, this function may be called more then once before
+ * register succeed. At this case only one error message will be logged. At boot
+ * time all clocks are on, so earlier SPI transactions should succeed.
+ */
+static int msm_spi_clk_path_postponed_register(struct msm_spi *dd)
+{
+	int ret = 0;
+
+	dd->bus_cl_hdl = msm_bus_scale_register(dd->pdata->master_id,
+						MSM_BUS_SLAVE_EBI_CH0,
+						(char *)dev_name(dd->dev),
+						false);
+
+	if (IS_ERR_OR_NULL(dd->bus_cl_hdl)) {
+		ret = (dd->bus_cl_hdl ? PTR_ERR(dd->bus_cl_hdl) : -EAGAIN);
+		dev_err(dd->dev, "Failed bus registration Err %d\n", ret);
+	}
+
+	return ret;
+}
+
+static void msm_spi_clk_path_init(struct msm_spi *dd)
+{
+	/*
+	 * bail out if path voting is diabled (master_id == 0) or if it is
+	 * already registered (client_hdl != 0)
+	 */
+	if (!dd->pdata->master_id || dd->bus_cl_hdl)
+		return;
+
+	/* on failure try again later */
+	if (msm_spi_clk_path_postponed_register(dd))
+		return;
+
+}
+
+static int msm_spi_calculate_size(int *fifo_size,
+				  int *block_size,
+				  int block,
+				  int mult)
+{
+	int words;
+
+	switch (block) {
+	case 0:
+		words = 1; /* 4 bytes */
+		break;
+	case 1:
+		words = 4; /* 16 bytes */
+		break;
+	case 2:
+		words = 8; /* 32 bytes */
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (mult) {
+	case 0:
+		*fifo_size = words * 2;
+		break;
+	case 1:
+		*fifo_size = words * 4;
+		break;
+	case 2:
+		*fifo_size = words * 8;
+		break;
+	case 3:
+		*fifo_size = words * 16;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	*block_size = words * sizeof(u32); /* in bytes */
+	return 0;
+}
+
+static void msm_spi_calculate_fifo_size(struct msm_spi *dd)
+{
+	u32 spi_iom;
+	int block;
+	int mult;
+
+	spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
+
+	block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT;
+	mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT;
+	if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size,
+				   block, mult)) {
+		goto fifo_size_err;
+	}
+
+	block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT;
+	mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT;
+	if (msm_spi_calculate_size(&dd->output_fifo_size,
+				   &dd->output_block_size, block, mult)) {
+		goto fifo_size_err;
+	}
+	if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
+		/* DM mode is not available for this block size */
+		if (dd->input_block_size == 4 || dd->output_block_size == 4)
+			dd->use_dma = false;
+
+		if (dd->use_dma) {
+			dd->input_burst_size = max(dd->input_block_size,
+						DM_BURST_SIZE);
+			dd->output_burst_size = max(dd->output_block_size,
+						DM_BURST_SIZE);
+		}
+	}
+
+	return;
+
+fifo_size_err:
+	dd->use_dma = false;
+	pr_err("%s: invalid FIFO size, SPI_IO_MODES=0x%x\n", __func__, spi_iom);
+}
+
+static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
+{
+	u32   data_in;
+	int   i;
+	int   shift;
+	int   read_bytes = (dd->pack_words ?
+				SPI_MAX_BYTES_PER_WORD : dd->bytes_per_word);
+
+	data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
+	if (dd->read_buf) {
+		for (i = 0; (i < read_bytes) &&
+			     dd->rx_bytes_remaining; i++) {
+			/* The data format depends on bytes_per_word:
+			 * 4 bytes: 0x12345678
+			 * 3 bytes: 0x00123456
+			 * 2 bytes: 0x00001234
+			 * 1 byte : 0x00000012
+			 */
+			shift = BITS_PER_BYTE * i;
+			*dd->read_buf++ = (data_in & (0xFF << shift)) >> shift;
+			dd->rx_bytes_remaining--;
+		}
+	} else {
+		if (dd->rx_bytes_remaining >= read_bytes)
+			dd->rx_bytes_remaining -= read_bytes;
+		else
+			dd->rx_bytes_remaining = 0;
+	}
+
+	dd->read_xfr_cnt++;
+}
+
+static inline bool msm_spi_is_valid_state(struct msm_spi *dd)
+{
+	u32 spi_op = readl_relaxed(dd->base + SPI_STATE);
+
+	return spi_op & SPI_OP_STATE_VALID;
+}
+
+static inline void msm_spi_udelay(unsigned int delay_usecs)
+{
+	/*
+	 * For smaller values of delay, context switch time
+	 * would negate the usage of usleep
+	 */
+	if (delay_usecs > 20)
+		usleep_range(delay_usecs, delay_usecs + 1);
+	else if (delay_usecs)
+		udelay(delay_usecs);
+}
+
+static inline int msm_spi_wait_valid(struct msm_spi *dd)
+{
+	unsigned int delay = 0;
+	unsigned long timeout = 0;
+
+	if (dd->clock_speed == 0)
+		return -EINVAL;
+	/*
+	 * Based on the SPI clock speed, sufficient time
+	 * should be given for the SPI state transition
+	 * to occur
+	 */
+	delay = (10 * USEC_PER_SEC) / dd->clock_speed;
+	/*
+	 * For small delay values, the default timeout would
+	 * be one jiffy
+	 */
+	if (delay < SPI_DELAY_THRESHOLD)
+		delay = SPI_DELAY_THRESHOLD;
+
+	/* Adding one to round off to the nearest jiffy */
+	timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT) + 1;
+	while (!msm_spi_is_valid_state(dd)) {
+		if (time_after(jiffies, timeout)) {
+			if (!msm_spi_is_valid_state(dd)) {
+				dev_err(dd->dev, "Invalid SPI operational state\n");
+				return -ETIMEDOUT;
+			} else
+				return 0;
+		}
+		msm_spi_udelay(delay);
+	}
+	return 0;
+}
+
+static inline int msm_spi_set_state(struct msm_spi *dd,
+				    enum msm_spi_state state)
+{
+	enum msm_spi_state cur_state;
+
+	if (msm_spi_wait_valid(dd))
+		return -EIO;
+	cur_state = readl_relaxed(dd->base + SPI_STATE);
+	/* Per spec:
+	 * For PAUSE_STATE to RESET_STATE, two writes of (10) are required
+	 */
+	if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) &&
+			(state == SPI_OP_STATE_RESET)) {
+		writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
+		writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
+	} else {
+		writel_relaxed((cur_state & ~SPI_OP_STATE) | state,
+		       dd->base + SPI_STATE);
+	}
+	if (msm_spi_wait_valid(dd))
+		return -EIO;
+
+	return 0;
+}
+
+/**
+ * msm_spi_set_bpw_and_no_io_flags: configure N, and no-input/no-output flags
+ */
+static inline void
+msm_spi_set_bpw_and_no_io_flags(struct msm_spi *dd, u32 *config, int n)
+{
+	*config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
+
+	if (n != (*config & SPI_CFG_N))
+		*config = (*config & ~SPI_CFG_N) | n;
+
+	if (dd->tx_mode == SPI_BAM_MODE) {
+		if (dd->read_buf == NULL)
+			*config |= SPI_NO_INPUT;
+		if (dd->write_buf == NULL)
+			*config |= SPI_NO_OUTPUT;
+	}
+}
+
+/**
+ * msm_spi_calc_spi_config_loopback_and_input_first: Calculate the values that
+ * should be updated into SPI_CONFIG's LOOPBACK and INPUT_FIRST flags
+ * @return calculatd value for SPI_CONFIG
+ */
+static u32
+msm_spi_calc_spi_config_loopback_and_input_first(u32 spi_config, u8 mode)
+{
+	if (mode & SPI_LOOP)
+		spi_config |= SPI_CFG_LOOPBACK;
+	else
+		spi_config &= ~SPI_CFG_LOOPBACK;
+
+	if (mode & SPI_CPHA)
+		spi_config &= ~SPI_CFG_INPUT_FIRST;
+	else
+		spi_config |= SPI_CFG_INPUT_FIRST;
+
+	return spi_config;
+}
+
+/**
+ * msm_spi_set_spi_config: prepares register SPI_CONFIG to process the
+ * next transfer
+ */
+static void msm_spi_set_spi_config(struct msm_spi *dd, int bpw)
+{
+	u32 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
+
+	spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
+					spi_config, dd->spi->mode);
+
+	if (dd->qup_ver == SPI_QUP_VERSION_NONE)
+		/* flags removed from SPI_CONFIG in QUP version-2 */
+		msm_spi_set_bpw_and_no_io_flags(dd, &spi_config, bpw-1);
+
+	/*
+	 * HS_MODE improves signal stability for spi-clk high rates
+	 * but is invalid in LOOPBACK mode.
+	 */
+	if ((dd->clock_speed >= SPI_HS_MIN_RATE) &&
+	   !(dd->spi->mode & SPI_LOOP))
+		spi_config |= SPI_CFG_HS_MODE;
+	else
+		spi_config &= ~SPI_CFG_HS_MODE;
+
+	writel_relaxed(spi_config, dd->base + SPI_CONFIG);
+}
+
+/**
+ * msm_spi_set_mx_counts: set SPI_MX_INPUT_COUNT and SPI_MX_INPUT_COUNT
+ * for FIFO-mode. set SPI_MX_INPUT_COUNT and SPI_MX_OUTPUT_COUNT for
+ * BAM and DMOV modes.
+ * @n_words The number of reads/writes of size N.
+ */
+static void msm_spi_set_mx_counts(struct msm_spi *dd, u32 n_words)
+{
+	/*
+	 * For FIFO mode:
+	 *   - Set the MX_OUTPUT_COUNT/MX_INPUT_COUNT registers to 0
+	 *   - Set the READ/WRITE_COUNT registers to 0 (infinite mode)
+	 *     or num bytes (finite mode) if less than fifo worth of data.
+	 * For Block mode:
+	 *  - Set the MX_OUTPUT/MX_INPUT_COUNT registers to num xfer bytes.
+	 *  - Set the READ/WRITE_COUNT registers to 0.
+	 */
+	if (dd->tx_mode != SPI_BAM_MODE) {
+		if (dd->tx_mode == SPI_FIFO_MODE) {
+			if (n_words <= dd->input_fifo_size)
+				msm_spi_set_write_count(dd, n_words);
+			else
+				msm_spi_set_write_count(dd, 0);
+			writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
+		} else
+			writel_relaxed(n_words, dd->base + SPI_MX_OUTPUT_COUNT);
+
+		if (dd->rx_mode == SPI_FIFO_MODE) {
+			if (n_words <= dd->input_fifo_size)
+				writel_relaxed(n_words,
+						dd->base + SPI_MX_READ_COUNT);
+			else
+				writel_relaxed(0,
+						dd->base + SPI_MX_READ_COUNT);
+			writel_relaxed(0, dd->base + SPI_MX_INPUT_COUNT);
+		} else
+			writel_relaxed(n_words, dd->base + SPI_MX_INPUT_COUNT);
+	} else {
+		/* must be zero for BAM and DMOV */
+		writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
+		msm_spi_set_write_count(dd, 0);
+
+		/*
+		 * for DMA transfers, both QUP_MX_INPUT_COUNT and
+		 * QUP_MX_OUTPUT_COUNT must be zero to all cases but one.
+		 * That case is a non-balanced transfer when there is
+		 * only a read_buf.
+		 */
+		if (dd->qup_ver == SPI_QUP_VERSION_BFAM) {
+			if (dd->write_buf)
+				writel_relaxed(0,
+						dd->base + SPI_MX_INPUT_COUNT);
+			else
+				writel_relaxed(n_words,
+						dd->base + SPI_MX_INPUT_COUNT);
+
+			writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
+		}
+	}
+}
+
+static int msm_spi_bam_pipe_disconnect(struct msm_spi *dd,
+						struct msm_spi_bam_pipe  *pipe)
+{
+	int ret = sps_disconnect(pipe->handle);
+
+	if (ret) {
+		dev_dbg(dd->dev, "%s disconnect bam %s pipe failed\n",
+			__func__, pipe->name);
+		return ret;
+	}
+	return 0;
+}
+
+static int msm_spi_bam_pipe_connect(struct msm_spi *dd,
+		struct msm_spi_bam_pipe  *pipe, struct sps_connect *config)
+{
+	int ret;
+	struct sps_register_event event  = {
+		.mode      = SPS_TRIGGER_WAIT,
+		.options   = SPS_O_EOT,
+	};
+
+	if (pipe == &dd->bam.prod)
+		event.xfer_done = &dd->rx_transfer_complete;
+	else if (pipe == &dd->bam.cons)
+		event.xfer_done = &dd->tx_transfer_complete;
+
+	ret = sps_connect(pipe->handle, config);
+	if (ret) {
+		dev_err(dd->dev, "%s: sps_connect(%s:0x%pK):%d\n",
+				__func__, pipe->name, pipe->handle, ret);
+		return ret;
+	}
+
+	ret = sps_register_event(pipe->handle, &event);
+	if (ret) {
+		dev_err(dd->dev, "%s sps_register_event(hndl:0x%pK %s):%d\n",
+				__func__, pipe->handle, pipe->name, ret);
+		msm_spi_bam_pipe_disconnect(dd, pipe);
+		return ret;
+	}
+
+	pipe->teardown_required = true;
+	return 0;
+}
+
+
+static void msm_spi_bam_pipe_flush(struct msm_spi *dd,
+					enum msm_spi_pipe_direction pipe_dir)
+{
+	struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
+					(&dd->bam.prod) : (&dd->bam.cons);
+	struct sps_connect           config  = pipe->config;
+	int    ret;
+
+	ret = msm_spi_bam_pipe_disconnect(dd, pipe);
+	if (ret)
+		return;
+
+	ret = msm_spi_bam_pipe_connect(dd, pipe, &config);
+	if (ret)
+		return;
+}
+
+static void msm_spi_bam_flush(struct msm_spi *dd)
+{
+	dev_dbg(dd->dev, "%s flushing bam for recovery\n", __func__);
+
+	msm_spi_bam_pipe_flush(dd, SPI_BAM_CONSUMER_PIPE);
+	msm_spi_bam_pipe_flush(dd, SPI_BAM_PRODUCER_PIPE);
+}
+
+static int
+msm_spi_bam_process_rx(struct msm_spi *dd, u32 *bytes_to_send, u32 desc_cnt)
+{
+	int ret = 0;
+	u32 data_xfr_size = 0, rem_bc = 0;
+	u32 prod_flags = 0;
+
+	rem_bc = dd->cur_rx_transfer->len - dd->bam.curr_rx_bytes_recvd;
+	data_xfr_size = (rem_bc < *bytes_to_send) ? rem_bc : *bytes_to_send;
+
+	/*
+	 * set flags for last descriptor only
+	 */
+	if ((desc_cnt == 1)
+		|| (*bytes_to_send == data_xfr_size))
+		prod_flags = (dd->write_buf)
+			? 0 : (SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD);
+
+	/*
+	 * enqueue read buffer in BAM
+	 */
+	ret = sps_transfer_one(dd->bam.prod.handle,
+			dd->cur_rx_transfer->rx_dma
+				+ dd->bam.curr_rx_bytes_recvd,
+			data_xfr_size, dd, prod_flags);
+	if (ret < 0) {
+		dev_err(dd->dev,
+		"%s: Failed to queue producer BAM transfer\n",
+		__func__);
+		return ret;
+	}
+
+	dd->bam.curr_rx_bytes_recvd += data_xfr_size;
+	*bytes_to_send -= data_xfr_size;
+	dd->bam.bam_rx_len -= data_xfr_size;
+	return data_xfr_size;
+}
+
+static int
+msm_spi_bam_process_tx(struct msm_spi *dd, u32 *bytes_to_send, u32 desc_cnt)
+{
+	int ret = 0;
+	u32 data_xfr_size = 0, rem_bc = 0;
+	u32 cons_flags = 0;
+
+	rem_bc = dd->cur_tx_transfer->len - dd->bam.curr_tx_bytes_sent;
+	data_xfr_size = (rem_bc < *bytes_to_send) ? rem_bc : *bytes_to_send;
+
+	/*
+	 * set flags for last descriptor only
+	 */
+	if ((desc_cnt == 1)
+		|| (*bytes_to_send == data_xfr_size))
+		cons_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD;
+
+	/*
+	 * enqueue write buffer in BAM
+	 */
+	ret = sps_transfer_one(dd->bam.cons.handle,
+			dd->cur_tx_transfer->tx_dma
+				+ dd->bam.curr_tx_bytes_sent,
+			data_xfr_size, dd, cons_flags);
+	if (ret < 0) {
+		dev_err(dd->dev,
+		"%s: Failed to queue consumer BAM transfer\n",
+		__func__);
+		return ret;
+	}
+
+	dd->bam.curr_tx_bytes_sent	+= data_xfr_size;
+	*bytes_to_send	-= data_xfr_size;
+	dd->bam.bam_tx_len -= data_xfr_size;
+	return data_xfr_size;
+}
+
+
+/**
+ * msm_spi_bam_begin_transfer: transfer dd->tx_bytes_remaining bytes
+ * using BAM.
+ * @brief BAM can transfer SPI_MAX_TRFR_BTWN_RESETS byte at a single
+ * transfer. Between transfer QUP must change to reset state. A loop is
+ * issuing a single BAM transfer at a time.
+ * @return zero on success
+ */
+static int
+msm_spi_bam_begin_transfer(struct msm_spi *dd)
+{
+	u32 tx_bytes_to_send = 0, rx_bytes_to_recv = 0;
+	u32 n_words_xfr;
+	s32 ret = 0;
+	u32 prod_desc_cnt = SPI_BAM_MAX_DESC_NUM - 1;
+	u32 cons_desc_cnt = SPI_BAM_MAX_DESC_NUM - 1;
+	u32 byte_count = 0;
+
+	rx_bytes_to_recv = min_t(u32, dd->bam.bam_rx_len,
+				SPI_MAX_TRFR_BTWN_RESETS);
+	tx_bytes_to_send = min_t(u32, dd->bam.bam_tx_len,
+				SPI_MAX_TRFR_BTWN_RESETS);
+	n_words_xfr = DIV_ROUND_UP(rx_bytes_to_recv,
+				dd->bytes_per_word);
+
+	msm_spi_set_mx_counts(dd, n_words_xfr);
+	ret = msm_spi_set_state(dd, SPI_OP_STATE_RUN);
+	if (ret < 0) {
+		dev_err(dd->dev,
+			"%s: Failed to set QUP state to run\n",
+			__func__);
+		goto xfr_err;
+	}
+
+	while ((rx_bytes_to_recv + tx_bytes_to_send) &&
+		((cons_desc_cnt + prod_desc_cnt) > 0)) {
+		struct spi_transfer *t = NULL;
+
+		if (dd->read_buf && (prod_desc_cnt > 0)) {
+			ret = msm_spi_bam_process_rx(dd, &rx_bytes_to_recv,
+							prod_desc_cnt);
+			if (ret < 0)
+				goto xfr_err;
+
+			if (!(dd->cur_rx_transfer->len
+				- dd->bam.curr_rx_bytes_recvd))
+				t = dd->cur_rx_transfer;
+			prod_desc_cnt--;
+		}
+
+		if (dd->write_buf && (cons_desc_cnt > 0)) {
+			ret = msm_spi_bam_process_tx(dd, &tx_bytes_to_send,
+							cons_desc_cnt);
+			if (ret < 0)
+				goto xfr_err;
+
+			if (!(dd->cur_tx_transfer->len
+				- dd->bam.curr_tx_bytes_sent))
+				t = dd->cur_tx_transfer;
+			cons_desc_cnt--;
+		}
+
+		byte_count += ret;
+	}
+
+	dd->tx_bytes_remaining -= min_t(u32, byte_count,
+						SPI_MAX_TRFR_BTWN_RESETS);
+	return 0;
+xfr_err:
+	return ret;
+}
+
+static int
+msm_spi_bam_next_transfer(struct msm_spi *dd)
+{
+	if (dd->tx_mode != SPI_BAM_MODE)
+		return 0;
+
+	if (dd->tx_bytes_remaining > 0) {
+		if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
+			return 0;
+		if ((msm_spi_bam_begin_transfer(dd)) < 0) {
+			dev_err(dd->dev, "%s: BAM transfer setup failed\n",
+				__func__);
+			return 0;
+		}
+		return 1;
+	}
+	return 0;
+}
+
+static int msm_spi_dma_send_next(struct msm_spi *dd)
+{
+	int ret = 0;
+
+	if (dd->tx_mode == SPI_BAM_MODE)
+		ret = msm_spi_bam_next_transfer(dd);
+	return ret;
+}
+
+static inline void msm_spi_ack_transfer(struct msm_spi *dd)
+{
+	writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
+		       SPI_OP_MAX_OUTPUT_DONE_FLAG,
+		       dd->base + SPI_OPERATIONAL);
+	/* Ensure done flag was cleared before proceeding further */
+	mb();
+}
+
+/* Figure which irq occurred and call the relevant functions */
+static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
+{
+	u32 op, ret = IRQ_NONE;
+	struct msm_spi *dd = dev_id;
+
+	if (pm_runtime_suspended(dd->dev)) {
+		dev_warn(dd->dev, "QUP: pm runtime suspend, irq:%d\n", irq);
+		return ret;
+	}
+	if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) ||
+	    readl_relaxed(dd->base + QUP_ERROR_FLAGS)) {
+		struct spi_master *master = dev_get_drvdata(dd->dev);
+
+		ret |= msm_spi_error_irq(irq, master);
+	}
+
+	op = readl_relaxed(dd->base + SPI_OPERATIONAL);
+	writel_relaxed(op, dd->base + SPI_OPERATIONAL);
+	/*
+	 * Ensure service flag was cleared before further
+	 * processing of interrupt.
+	 */
+	mb();
+	if (op & SPI_OP_INPUT_SERVICE_FLAG)
+		ret |= msm_spi_input_irq(irq, dev_id);
+
+	if (op & SPI_OP_OUTPUT_SERVICE_FLAG)
+		ret |= msm_spi_output_irq(irq, dev_id);
+
+	if (dd->tx_mode != SPI_BAM_MODE) {
+		if (!dd->rx_done) {
+			if (dd->rx_bytes_remaining == 0)
+				dd->rx_done = true;
+		}
+		if (!dd->tx_done) {
+			if (!dd->tx_bytes_remaining &&
+					(op & SPI_OP_IP_FIFO_NOT_EMPTY)) {
+				dd->tx_done = true;
+			}
+		}
+	}
+	if (dd->tx_done && dd->rx_done) {
+		msm_spi_set_state(dd, SPI_OP_STATE_RESET);
+		dd->tx_done = false;
+		dd->rx_done = false;
+		complete(&dd->rx_transfer_complete);
+		complete(&dd->tx_transfer_complete);
+	}
+	return ret;
+}
+
+static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
+{
+	struct msm_spi	       *dd = dev_id;
+
+	dd->stat_rx++;
+
+	if (dd->rx_mode == SPI_MODE_NONE)
+		return IRQ_HANDLED;
+
+	if (dd->rx_mode == SPI_FIFO_MODE) {
+		while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
+			SPI_OP_IP_FIFO_NOT_EMPTY) &&
+			(dd->rx_bytes_remaining > 0)) {
+			msm_spi_read_word_from_fifo(dd);
+		}
+	} else if (dd->rx_mode == SPI_BLOCK_MODE) {
+		int count = 0;
+
+		while (dd->rx_bytes_remaining &&
+				(count < dd->input_block_size)) {
+			msm_spi_read_word_from_fifo(dd);
+			count += SPI_MAX_BYTES_PER_WORD;
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
+{
+	u32    word;
+	u8     byte;
+	int    i;
+	int   write_bytes =
+		(dd->pack_words ? SPI_MAX_BYTES_PER_WORD : dd->bytes_per_word);
+
+	word = 0;
+	if (dd->write_buf) {
+		for (i = 0; (i < write_bytes) &&
+			     dd->tx_bytes_remaining; i++) {
+			dd->tx_bytes_remaining--;
+			byte = *dd->write_buf++;
+			word |= (byte << (BITS_PER_BYTE * i));
+		}
+	} else
+		if (dd->tx_bytes_remaining > write_bytes)
+			dd->tx_bytes_remaining -= write_bytes;
+		else
+			dd->tx_bytes_remaining = 0;
+	dd->write_xfr_cnt++;
+
+	writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO);
+}
+
+static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
+{
+	int count = 0;
+
+	if (dd->tx_mode == SPI_FIFO_MODE) {
+		while ((dd->tx_bytes_remaining > 0) &&
+			(count < dd->input_fifo_size) &&
+		       !(readl_relaxed(dd->base + SPI_OPERATIONAL)
+						& SPI_OP_OUTPUT_FIFO_FULL)) {
+			msm_spi_write_word_to_fifo(dd);
+			count++;
+		}
+	}
+
+	if (dd->tx_mode == SPI_BLOCK_MODE) {
+		while (dd->tx_bytes_remaining &&
+				(count < dd->output_block_size)) {
+			msm_spi_write_word_to_fifo(dd);
+			count += SPI_MAX_BYTES_PER_WORD;
+		}
+	}
+}
+
+static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
+{
+	struct msm_spi	       *dd = dev_id;
+
+	dd->stat_tx++;
+
+	if (dd->tx_mode == SPI_MODE_NONE)
+		return IRQ_HANDLED;
+
+	/* Output FIFO is empty. Transmit any outstanding write data. */
+	if ((dd->tx_mode == SPI_FIFO_MODE) || (dd->tx_mode == SPI_BLOCK_MODE))
+		msm_spi_write_rmn_to_fifo(dd);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t msm_spi_error_irq(int irq, void *dev_id)
+{
+	struct spi_master	*master = dev_id;
+	struct msm_spi          *dd = spi_master_get_devdata(master);
+	u32                      spi_err;
+
+	spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS);
+	if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR)
+		dev_warn(master->dev.parent, "SPI output overrun error\n");
+	if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR)
+		dev_warn(master->dev.parent, "SPI input underrun error\n");
+	if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR)
+		dev_warn(master->dev.parent, "SPI output underrun error\n");
+	msm_spi_get_clk_err(dd, &spi_err);
+	if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR)
+		dev_warn(master->dev.parent, "SPI clock overrun error\n");
+	if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR)
+		dev_warn(master->dev.parent, "SPI clock underrun error\n");
+	msm_spi_clear_error_flags(dd);
+	msm_spi_ack_clk_err(dd);
+	/* Ensure clearing of QUP_ERROR_FLAGS was completed */
+	mb();
+	return IRQ_HANDLED;
+}
+
+static int msm_spi_bam_map_buffers(struct msm_spi *dd)
+{
+	int ret = -EINVAL;
+	struct device *dev;
+	struct spi_transfer *xfr;
+	void *tx_buf, *rx_buf;
+	u32 tx_len, rx_len;
+
+	dev = dd->dev;
+	xfr = dd->cur_transfer;
+
+	tx_buf = (void *)xfr->tx_buf;
+	rx_buf = xfr->rx_buf;
+	tx_len = rx_len = xfr->len;
+	if (tx_buf != NULL) {
+		xfr->tx_dma = dma_map_single(dev, tx_buf,
+						tx_len, DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, xfr->tx_dma)) {
+			ret = -ENOMEM;
+			goto error;
+		}
+	}
+
+	if (rx_buf != NULL) {
+		xfr->rx_dma = dma_map_single(dev, rx_buf, rx_len,
+						DMA_FROM_DEVICE);
+		if (dma_mapping_error(dev, xfr->rx_dma)) {
+			if (tx_buf != NULL)
+				dma_unmap_single(dev,
+						xfr->tx_dma,
+						tx_len, DMA_TO_DEVICE);
+			ret = -ENOMEM;
+			goto error;
+		}
+	}
+
+	return 0;
+error:
+	msm_spi_dma_unmap_buffers(dd);
+	return ret;
+}
+
+static int msm_spi_dma_map_buffers(struct msm_spi *dd)
+{
+	int ret = 0;
+
+	if (dd->tx_mode == SPI_BAM_MODE)
+		ret = msm_spi_bam_map_buffers(dd);
+	return ret;
+}
+
+static void msm_spi_bam_unmap_buffers(struct msm_spi *dd)
+{
+	struct device *dev;
+	struct spi_transfer *xfr;
+	void *tx_buf, *rx_buf;
+	u32  tx_len, rx_len;
+
+	dev = dd->dev;
+	xfr = dd->cur_transfer;
+
+	tx_buf = (void *)xfr->tx_buf;
+	rx_buf = xfr->rx_buf;
+	tx_len = rx_len = xfr->len;
+	if (tx_buf != NULL)
+		dma_unmap_single(dev, xfr->tx_dma,
+				tx_len, DMA_TO_DEVICE);
+
+	if (rx_buf != NULL)
+		dma_unmap_single(dev, xfr->rx_dma,
+				rx_len, DMA_FROM_DEVICE);
+}
+
+static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd)
+{
+	if (dd->tx_mode == SPI_BAM_MODE)
+		msm_spi_bam_unmap_buffers(dd);
+}
+
+/**
+ * msm_spi_use_dma - decides whether to use Data-Mover or BAM for
+ * the given transfer
+ * @dd:       device
+ * @tr:       transfer
+ *
+ * Start using DMA if:
+ * 1. Is supported by HW
+ * 2. Is not diabled by platform data
+ * 3. Transfer size is greater than 3*block size.
+ * 4. Buffers are aligned to cache line.
+ * 5. Bytes-per-word is 8,16 or 32.
+ */
+static inline bool
+msm_spi_use_dma(struct msm_spi *dd, struct spi_transfer *tr, u8 bpw)
+{
+	if (!dd->use_dma)
+		return false;
+
+	/* check constraints from platform data */
+	if ((dd->qup_ver == SPI_QUP_VERSION_BFAM) && !dd->pdata->use_bam)
+		return false;
+
+	if (dd->cur_msg_len < 3*dd->input_block_size)
+		return false;
+
+	if ((dd->qup_ver != SPI_QUP_VERSION_BFAM) &&
+		 !dd->read_len && !dd->write_len)
+		return false;
+
+	if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
+		u32 cache_line = dma_get_cache_alignment();
+
+		if (tr->tx_buf) {
+			if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
+				return false;
+		}
+		if (tr->rx_buf) {
+			if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
+				return false;
+		}
+
+		if (tr->cs_change &&
+		   ((bpw != 8) && (bpw != 16) && (bpw != 32)))
+			return false;
+	}
+
+	return true;
+}
+
+/**
+ * msm_spi_set_transfer_mode: Chooses optimal transfer mode. Sets dd->mode and
+ * prepares to process a transfer.
+ */
+static void
+msm_spi_set_transfer_mode(struct msm_spi *dd, u8 bpw, u32 read_count)
+{
+	if (msm_spi_use_dma(dd, dd->cur_transfer, bpw)) {
+		dd->tx_mode = SPI_BAM_MODE;
+		dd->rx_mode = SPI_BAM_MODE;
+	} else {
+		dd->rx_mode = SPI_FIFO_MODE;
+		dd->tx_mode = SPI_FIFO_MODE;
+		dd->read_len = dd->cur_transfer->len;
+		dd->write_len = dd->cur_transfer->len;
+	}
+}
+
+/**
+ * msm_spi_set_qup_io_modes: prepares register QUP_IO_MODES to process a
+ * transfer
+ */
+static void msm_spi_set_qup_io_modes(struct msm_spi *dd)
+{
+	u32 spi_iom;
+
+	spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
+	/* Set input and output transfer mode: FIFO, DMOV, or BAM */
+	spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
+	spi_iom = (spi_iom | (dd->tx_mode << OUTPUT_MODE_SHIFT));
+	spi_iom = (spi_iom | (dd->rx_mode << INPUT_MODE_SHIFT));
+
+	/* Always enable packing for the BAM mode and for non BAM mode only
+	 * if bpw is % 8 and transfer length is % 4 Bytes.
+	 */
+	if (dd->tx_mode == SPI_BAM_MODE ||
+		((dd->cur_msg_len % SPI_MAX_BYTES_PER_WORD == 0) &&
+		(dd->cur_transfer->bits_per_word) &&
+		(dd->cur_transfer->bits_per_word <= 32) &&
+		(dd->cur_transfer->bits_per_word % 8 == 0))) {
+		spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
+		dd->pack_words = true;
+	} else {
+		spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
+		spi_iom |= SPI_IO_M_OUTPUT_BIT_SHIFT_EN;
+		dd->pack_words = false;
+	}
+
+	writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
+}
+
+static u32 msm_spi_calc_spi_ioc_clk_polarity(u32 spi_ioc, u8 mode)
+{
+	if (mode & SPI_CPOL)
+		spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
+	else
+		spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
+	return spi_ioc;
+}
+
+/**
+ * msm_spi_set_spi_io_control: prepares register SPI_IO_CONTROL to process the
+ * next transfer
+ * @return the new set value of SPI_IO_CONTROL
+ */
+static u32 msm_spi_set_spi_io_control(struct msm_spi *dd)
+{
+	u32 spi_ioc, spi_ioc_orig, chip_select;
+
+	spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
+	spi_ioc_orig = spi_ioc;
+	spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc
+						, dd->spi->mode);
+	/* Set chip-select */
+	chip_select = dd->spi->chip_select << 2;
+	if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
+		spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
+	if (!dd->cur_transfer->cs_change)
+		spi_ioc |= SPI_IO_C_MX_CS_MODE;
+
+	if (spi_ioc != spi_ioc_orig)
+		writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
+
+	/*
+	 * Ensure that the IO control mode register gets written
+	 * before proceeding with the transfer.
+	 */
+	mb();
+	return spi_ioc;
+}
+
+/**
+ * msm_spi_set_qup_op_mask: prepares register QUP_OPERATIONAL_MASK to process
+ * the next transfer
+ */
+static void msm_spi_set_qup_op_mask(struct msm_spi *dd)
+{
+	/* mask INPUT and OUTPUT service flags in to prevent IRQs on FIFO status
+	 * change in BAM mode
+	 */
+	u32 mask = (dd->tx_mode == SPI_BAM_MODE) ?
+		QUP_OP_MASK_OUTPUT_SERVICE_FLAG | QUP_OP_MASK_INPUT_SERVICE_FLAG
+		: 0;
+	writel_relaxed(mask, dd->base + QUP_OPERATIONAL_MASK);
+}
+
+static void get_transfer_length(struct msm_spi *dd)
+{
+	struct spi_transfer *xfer = dd->cur_transfer;
+
+	dd->cur_msg_len = 0;
+	dd->read_len = dd->write_len = 0;
+	dd->bam.bam_tx_len = dd->bam.bam_rx_len = 0;
+
+	if (xfer->tx_buf)
+		dd->bam.bam_tx_len = dd->write_len = xfer->len;
+	if (xfer->rx_buf)
+		dd->bam.bam_rx_len = dd->read_len = xfer->len;
+	dd->cur_msg_len = xfer->len;
+}
+
+static int msm_spi_process_transfer(struct msm_spi *dd)
+{
+	u8  bpw;
+	u32 max_speed;
+	u32 read_count;
+	u32 timeout;
+	u32 spi_ioc;
+	u32 int_loopback = 0;
+	int ret;
+	int status = 0;
+
+	get_transfer_length(dd);
+	dd->cur_tx_transfer = dd->cur_transfer;
+	dd->cur_rx_transfer = dd->cur_transfer;
+	dd->bam.curr_rx_bytes_recvd = dd->bam.curr_tx_bytes_sent = 0;
+	dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
+	dd->tx_bytes_remaining = dd->cur_msg_len;
+	dd->rx_bytes_remaining = dd->cur_msg_len;
+	dd->read_buf           = dd->cur_transfer->rx_buf;
+	dd->write_buf          = dd->cur_transfer->tx_buf;
+	dd->tx_done = false;
+	dd->rx_done = false;
+	init_completion(&dd->tx_transfer_complete);
+	init_completion(&dd->rx_transfer_complete);
+	if (dd->cur_transfer->bits_per_word)
+		bpw = dd->cur_transfer->bits_per_word;
+	else
+		bpw = 8;
+	dd->bytes_per_word = (bpw + 7) / 8;
+
+	if (dd->cur_transfer->speed_hz)
+		max_speed = dd->cur_transfer->speed_hz;
+	else
+		max_speed = dd->spi->max_speed_hz;
+	if (!dd->clock_speed || max_speed != dd->clock_speed)
+		msm_spi_clock_set(dd, max_speed);
+
+	timeout = 100 * msecs_to_jiffies(
+			DIV_ROUND_UP(dd->cur_msg_len * 8,
+			DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
+
+	read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
+	if (dd->spi->mode & SPI_LOOP)
+		int_loopback = 1;
+
+	ret = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
+	if (ret < 0) {
+		dev_err(dd->dev,
+			"%s: Error setting QUP to reset-state\n",
+			__func__);
+		return ret;
+	}
+
+	msm_spi_set_transfer_mode(dd, bpw, read_count);
+	msm_spi_set_mx_counts(dd, read_count);
+	if (dd->tx_mode == SPI_BAM_MODE) {
+		ret = msm_spi_dma_map_buffers(dd);
+		if (ret < 0) {
+			pr_err("%s(): Error Mapping DMA buffers\n", __func__);
+			dd->tx_mode = SPI_MODE_NONE;
+			dd->rx_mode = SPI_MODE_NONE;
+			return ret;
+		}
+	}
+	msm_spi_set_qup_io_modes(dd);
+	msm_spi_set_spi_config(dd, bpw);
+	msm_spi_set_qup_config(dd, bpw);
+	spi_ioc = msm_spi_set_spi_io_control(dd);
+	msm_spi_set_qup_op_mask(dd);
+
+	/* The output fifo interrupt handler will handle all writes after
+	 * the first. Restricting this to one write avoids contention
+	 * issues and race conditions between this thread and the int handler
+	 */
+	if (dd->tx_mode != SPI_BAM_MODE) {
+		if (msm_spi_prepare_for_write(dd))
+			goto transfer_end;
+		msm_spi_start_write(dd, read_count);
+	} else {
+		if ((msm_spi_bam_begin_transfer(dd)) < 0) {
+			dev_err(dd->dev, "%s: BAM transfer setup failed\n",
+				__func__);
+			status = -EIO;
+			goto transfer_end;
+		}
+	}
+
+	/*
+	 * On BAM mode, current state here is run.
+	 * Only enter the RUN state after the first word is written into
+	 * the output FIFO. Otherwise, the output FIFO EMPTY interrupt
+	 * might fire before the first word is written resulting in a
+	 * possible race condition.
+	 */
+	if (dd->tx_mode != SPI_BAM_MODE)
+		if (msm_spi_set_state(dd, SPI_OP_STATE_RUN)) {
+			dev_warn(dd->dev,
+				"%s: Failed to set QUP to run-state. Mode:%d\n",
+				__func__, dd->tx_mode);
+			goto transfer_end;
+		}
+
+	/* Assume success, this might change later upon transaction result */
+	do {
+		if (dd->write_buf &&
+		    !wait_for_completion_timeout(&dd->tx_transfer_complete,
+		    timeout)) {
+			dev_err(dd->dev, "%s: SPI Tx transaction timeout\n",
+				__func__);
+			status = -EIO;
+			break;
+		}
+
+		if (dd->read_buf &&
+		    !wait_for_completion_timeout(&dd->rx_transfer_complete,
+		    timeout)) {
+			dev_err(dd->dev, "%s: SPI Rx transaction timeout\n",
+				__func__);
+			status = -EIO;
+			break;
+		}
+	} while (msm_spi_dma_send_next(dd));
+
+	msm_spi_udelay(dd->xfrs_delay_usec);
+
+transfer_end:
+	if ((dd->tx_mode == SPI_BAM_MODE) && status)
+		msm_spi_bam_flush(dd);
+	msm_spi_dma_unmap_buffers(dd);
+	dd->tx_mode = SPI_MODE_NONE;
+	dd->rx_mode = SPI_MODE_NONE;
+
+	msm_spi_set_state(dd, SPI_OP_STATE_RESET);
+	if (!dd->cur_transfer->cs_change)
+		writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE,
+		       dd->base + SPI_IO_CONTROL);
+	return status;
+}
+
+
+static inline void msm_spi_set_cs(struct spi_device *spi, bool set_flag)
+{
+	struct msm_spi *dd = spi_master_get_devdata(spi->master);
+	u32 spi_ioc;
+	u32 spi_ioc_orig;
+	int rc = 0;
+
+	rc = pm_runtime_get_sync(dd->dev);
+	if (rc < 0) {
+		dev_err(dd->dev, "Failure during runtime get,rc=%d\n", rc);
+		return;
+	}
+
+	if (dd->pdata->is_shared) {
+		rc = get_local_resources(dd);
+		if (rc)
+			return;
+	}
+
+	msm_spi_clk_path_vote(dd, spi->max_speed_hz);
+
+	if (!(spi->mode & SPI_CS_HIGH))
+		set_flag = !set_flag;
+
+	/* Serve only under mutex lock as RT suspend may cause a race */
+	mutex_lock(&dd->core_lock);
+	if (dd->suspended) {
+		dev_err(dd->dev, "%s: SPI operational state=%d Invalid\n",
+			__func__, dd->suspended);
+		mutex_unlock(&dd->core_lock);
+		return;
+	}
+
+	spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
+	spi_ioc_orig = spi_ioc;
+	if (set_flag)
+		spi_ioc |= SPI_IO_C_FORCE_CS;
+	else
+		spi_ioc &= ~SPI_IO_C_FORCE_CS;
+
+	if (spi_ioc != spi_ioc_orig)
+		writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
+	if (dd->pdata->is_shared)
+		put_local_resources(dd);
+	mutex_unlock(&dd->core_lock);
+
+	pm_runtime_mark_last_busy(dd->dev);
+	pm_runtime_put_autosuspend(dd->dev);
+}
+
+static void reset_core(struct msm_spi *dd)
+{
+	u32 spi_ioc;
+
+	msm_spi_register_init(dd);
+	/*
+	 * The SPI core generates a bogus input overrun error on some targets,
+	 * when a transition from run to reset state occurs and if the FIFO has
+	 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
+	 * bit.
+	 */
+	msm_spi_enable_error_flags(dd);
+
+	spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
+	spi_ioc |= SPI_IO_C_NO_TRI_STATE;
+	writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
+	/*
+	 * Ensure that the IO control is written to before returning.
+	 */
+	mb();
+	msm_spi_set_state(dd, SPI_OP_STATE_RESET);
+}
+
+static void put_local_resources(struct msm_spi *dd)
+{
+	msm_spi_disable_irqs(dd);
+	clk_disable_unprepare(dd->clk);
+	dd->clock_speed = 0;
+	clk_disable_unprepare(dd->pclk);
+
+	/* Free  the spi clk, miso, mosi, cs gpio */
+	if (dd->pdata && dd->pdata->gpio_release)
+		dd->pdata->gpio_release();
+
+	msm_spi_free_gpios(dd);
+}
+
+static int get_local_resources(struct msm_spi *dd)
+{
+	int ret = -EINVAL;
+
+	/* Configure the spi clk, miso, mosi and cs gpio */
+	if (dd->pdata->gpio_config) {
+		ret = dd->pdata->gpio_config();
+		if (ret) {
+			dev_err(dd->dev,
+					"%s: error configuring GPIOs\n",
+					__func__);
+			return ret;
+		}
+	}
+
+	ret = msm_spi_request_gpios(dd);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(dd->clk);
+	if (ret)
+		goto clk0_err;
+	ret = clk_prepare_enable(dd->pclk);
+	if (ret)
+		goto clk1_err;
+	msm_spi_enable_irqs(dd);
+
+	return 0;
+
+clk1_err:
+	clk_disable_unprepare(dd->clk);
+clk0_err:
+	msm_spi_free_gpios(dd);
+	return ret;
+}
+
+/**
+ * msm_spi_transfer_one: To process one spi transfer at a time
+ * @master: spi master controller reference
+ * @msg: one multi-segment SPI transaction
+ * @return zero on success or negative error value
+ *
+ */
+static int msm_spi_transfer_one(struct spi_master *master,
+				struct spi_device *spi,
+				struct spi_transfer *xfer)
+{
+	struct msm_spi	*dd;
+	unsigned long        flags;
+	u32	status_error = 0;
+
+	dd = spi_master_get_devdata(master);
+
+	/* Check message parameters */
+	if (xfer->speed_hz > dd->pdata->max_clock_speed ||
+	    (xfer->bits_per_word &&
+	     (xfer->bits_per_word < 4 || xfer->bits_per_word > 32)) ||
+	    (xfer->tx_buf == NULL && xfer->rx_buf == NULL)) {
+		dev_err(dd->dev,
+			"Invalid transfer: %d Hz, %d bpw tx=%pK, rx=%pK\n",
+			xfer->speed_hz, xfer->bits_per_word,
+			xfer->tx_buf, xfer->rx_buf);
+		return -EINVAL;
+	}
+	dd->spi = spi;
+	dd->cur_transfer = xfer;
+
+	mutex_lock(&dd->core_lock);
+
+	spin_lock_irqsave(&dd->queue_lock, flags);
+	dd->transfer_pending = true;
+	spin_unlock_irqrestore(&dd->queue_lock, flags);
+	/*
+	 * get local resources for each transfer to ensure we're in a good
+	 * state and not interfering with other EE's using this device
+	 */
+	if (dd->pdata->is_shared) {
+		if (get_local_resources(dd)) {
+			mutex_unlock(&dd->core_lock);
+			spi_finalize_current_message(master);
+			return -EINVAL;
+		}
+
+		reset_core(dd);
+		if (dd->use_dma) {
+			msm_spi_bam_pipe_connect(dd, &dd->bam.prod,
+					&dd->bam.prod.config);
+			msm_spi_bam_pipe_connect(dd, &dd->bam.cons,
+					&dd->bam.cons.config);
+		}
+	}
+
+	if (dd->suspended || !msm_spi_is_valid_state(dd)) {
+		dev_err(dd->dev, "%s: SPI operational state not valid\n",
+			__func__);
+		status_error = 1;
+	}
+
+
+	if (!status_error)
+		status_error =
+			msm_spi_process_transfer(dd);
+
+	spin_lock_irqsave(&dd->queue_lock, flags);
+	dd->transfer_pending = false;
+	spin_unlock_irqrestore(&dd->queue_lock, flags);
+
+	/*
+	 * Put local resources prior to calling finalize to ensure the hw
+	 * is in a known state before notifying the calling thread (which is a
+	 * different context since we're running in the spi kthread here) to
+	 * prevent race conditions between us and any other EE's using this hw.
+	 */
+	if (dd->pdata->is_shared) {
+		if (dd->use_dma) {
+			msm_spi_bam_pipe_disconnect(dd, &dd->bam.prod);
+			msm_spi_bam_pipe_disconnect(dd, &dd->bam.cons);
+		}
+		put_local_resources(dd);
+	}
+	mutex_unlock(&dd->core_lock);
+	if (dd->suspended)
+		wake_up_interruptible(&dd->continue_suspend);
+	return status_error;
+}
+
+static int msm_spi_prepare_transfer_hardware(struct spi_master *master)
+{
+	struct msm_spi	*dd = spi_master_get_devdata(master);
+	int resume_state = 0;
+
+	resume_state = pm_runtime_get_sync(dd->dev);
+	if (resume_state < 0)
+		goto spi_finalize;
+
+	/*
+	 * Counter-part of system-suspend when runtime-pm is not enabled.
+	 * This way, resume can be left empty and device will be put in
+	 * active mode only if client requests anything on the bus
+	 */
+	if (!pm_runtime_enabled(dd->dev))
+		resume_state = msm_spi_pm_resume_runtime(dd->dev);
+	if (resume_state < 0)
+		goto spi_finalize;
+	if (dd->suspended) {
+		resume_state = -EBUSY;
+		goto spi_finalize;
+	}
+	return 0;
+
+spi_finalize:
+	spi_finalize_current_message(master);
+	return resume_state;
+}
+
+static int msm_spi_unprepare_transfer_hardware(struct spi_master *master)
+{
+	struct msm_spi	*dd = spi_master_get_devdata(master);
+
+	pm_runtime_mark_last_busy(dd->dev);
+	pm_runtime_put_autosuspend(dd->dev);
+	return 0;
+}
+
+static int msm_spi_setup(struct spi_device *spi)
+{
+	struct msm_spi	*dd;
+	int              rc = 0;
+	u32              spi_ioc;
+	u32              spi_config;
+	u32              mask;
+
+	if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
+		dev_err(&spi->dev, "%s: invalid bits_per_word %d\n",
+			__func__, spi->bits_per_word);
+		return -EINVAL;
+	}
+	if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) {
+		dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n",
+			__func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1);
+		return -EINVAL;
+	}
+
+	dd = spi_master_get_devdata(spi->master);
+
+	rc = pm_runtime_get_sync(dd->dev);
+	if (rc < 0 && !dd->is_init_complete &&
+			pm_runtime_enabled(dd->dev)) {
+		pm_runtime_set_suspended(dd->dev);
+		pm_runtime_put_sync(dd->dev);
+		rc = 0;
+		goto err_setup_exit;
+	} else
+		rc = 0;
+
+	mutex_lock(&dd->core_lock);
+
+	/* Counter-part of system-suspend when runtime-pm is not enabled. */
+	if (!pm_runtime_enabled(dd->dev)) {
+		rc = msm_spi_pm_resume_runtime(dd->dev);
+		if (rc < 0 && !dd->is_init_complete) {
+			rc = 0;
+			mutex_unlock(&dd->core_lock);
+			goto err_setup_exit;
+		}
+	}
+
+	if (dd->suspended) {
+		rc = -EBUSY;
+		mutex_unlock(&dd->core_lock);
+		goto err_setup_exit;
+	}
+
+	if (dd->pdata->is_shared) {
+		rc = get_local_resources(dd);
+		if (rc)
+			goto no_resources;
+	}
+
+	spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
+	mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
+	if (spi->mode & SPI_CS_HIGH)
+		spi_ioc |= mask;
+	else
+		spi_ioc &= ~mask;
+	spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc, spi->mode);
+
+	writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
+
+	spi_config = readl_relaxed(dd->base + SPI_CONFIG);
+	spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
+							spi_config, spi->mode);
+	writel_relaxed(spi_config, dd->base + SPI_CONFIG);
+
+	/* Ensure previous write completed before disabling the clocks */
+	mb();
+	if (dd->pdata->is_shared)
+		put_local_resources(dd);
+	/* Counter-part of system-resume when runtime-pm is not enabled. */
+	if (!pm_runtime_enabled(dd->dev))
+		msm_spi_pm_suspend_runtime(dd->dev);
+
+no_resources:
+	mutex_unlock(&dd->core_lock);
+	pm_runtime_mark_last_busy(dd->dev);
+	pm_runtime_put_autosuspend(dd->dev);
+
+err_setup_exit:
+	return rc;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int debugfs_iomem_x32_set(void *data, u64 val)
+{
+	struct msm_spi_debugfs_data *reg = (struct msm_spi_debugfs_data *)data;
+	struct msm_spi *dd = reg->dd;
+	int ret;
+
+	ret = pm_runtime_get_sync(dd->dev);
+	if (ret < 0)
+		return ret;
+
+	writel_relaxed(val, (dd->base + reg->offset));
+	/* Ensure the previous write completed. */
+	mb();
+
+	pm_runtime_mark_last_busy(dd->dev);
+	pm_runtime_put_autosuspend(dd->dev);
+	return 0;
+}
+
+static int debugfs_iomem_x32_get(void *data, u64 *val)
+{
+	struct msm_spi_debugfs_data *reg = (struct msm_spi_debugfs_data *)data;
+	struct msm_spi *dd = reg->dd;
+	int ret;
+
+	ret = pm_runtime_get_sync(dd->dev);
+	if (ret < 0)
+		return ret;
+	*val = readl_relaxed(dd->base + reg->offset);
+	/* Ensure the previous read completed. */
+	mb();
+
+	pm_runtime_mark_last_busy(dd->dev);
+	pm_runtime_put_autosuspend(dd->dev);
+	return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
+			debugfs_iomem_x32_set, "0x%08llx\n");
+
+static void spi_debugfs_init(struct msm_spi *dd)
+{
+	char dir_name[20];
+
+	scnprintf(dir_name, sizeof(dir_name), "%s_dbg", dev_name(dd->dev));
+	dd->dent_spi = debugfs_create_dir(dir_name, NULL);
+	if (dd->dent_spi) {
+		int i;
+
+		for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) {
+			dd->reg_data[i].offset = debugfs_spi_regs[i].offset;
+			dd->reg_data[i].dd = dd;
+			dd->debugfs_spi_regs[i] =
+			   debugfs_create_file(
+			       debugfs_spi_regs[i].name,
+			       debugfs_spi_regs[i].mode,
+			       dd->dent_spi, &dd->reg_data[i],
+			       &fops_iomem_x32);
+		}
+	}
+}
+
+static void spi_debugfs_exit(struct msm_spi *dd)
+{
+	if (dd->dent_spi) {
+		int i;
+
+		debugfs_remove_recursive(dd->dent_spi);
+		dd->dent_spi = NULL;
+		for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++)
+			dd->debugfs_spi_regs[i] = NULL;
+	}
+}
+#else
+static void spi_debugfs_init(struct msm_spi *dd) {}
+static void spi_debugfs_exit(struct msm_spi *dd) {}
+#endif
+
+/* ===Device attributes begin=== */
+static ssize_t stats_show(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	struct spi_master *master = dev_get_drvdata(dev);
+	struct msm_spi *dd =  spi_master_get_devdata(master);
+
+	return scnprintf(buf, PAGE_SIZE,
+			"Device       %s\n"
+			"rx fifo_size = %d spi words\n"
+			"tx fifo_size = %d spi words\n"
+			"use_dma ?    %s\n"
+			"rx block size = %d bytes\n"
+			"tx block size = %d bytes\n"
+			"input burst size = %d bytes\n"
+			"output burst size = %d bytes\n"
+			"DMA configuration:\n"
+			"tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n"
+			"--statistics--\n"
+			"Rx isrs  = %d\n"
+			"Tx isrs  = %d\n"
+			"--debug--\n"
+			"NA yet\n",
+			dev_name(dev),
+			dd->input_fifo_size,
+			dd->output_fifo_size,
+			dd->use_dma ? "yes" : "no",
+			dd->input_block_size,
+			dd->output_block_size,
+			dd->input_burst_size,
+			dd->output_burst_size,
+			dd->tx_dma_chan,
+			dd->rx_dma_chan,
+			dd->tx_dma_crci,
+			dd->rx_dma_crci,
+			dd->stat_rx,
+			dd->stat_tx
+			);
+}
+
+/* Reset statistics on write */
+static ssize_t stats_store(struct device *dev, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct msm_spi *dd = dev_get_drvdata(dev);
+
+	dd->stat_rx = 0;
+	dd->stat_tx = 0;
+	return count;
+}
+
+static DEVICE_ATTR_RW(stats);
+
+static struct attribute *dev_attrs[] = {
+	&dev_attr_stats.attr,
+	NULL,
+};
+
+static struct attribute_group dev_attr_grp = {
+	.attrs = dev_attrs,
+};
+/* ===Device attributes end=== */
+
+static void msm_spi_bam_pipe_teardown(struct msm_spi *dd,
+					enum msm_spi_pipe_direction pipe_dir)
+{
+	struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
+					(&dd->bam.prod) : (&dd->bam.cons);
+	if (!pipe->teardown_required)
+		return;
+
+	msm_spi_bam_pipe_disconnect(dd, pipe);
+	dma_free_coherent(dd->dev, pipe->config.desc.size,
+		pipe->config.desc.base, pipe->config.desc.phys_base);
+	sps_free_endpoint(pipe->handle);
+	pipe->handle = NULL;
+	pipe->teardown_required = false;
+}
+
+static int msm_spi_bam_pipe_init(struct msm_spi *dd,
+					enum msm_spi_pipe_direction pipe_dir)
+{
+	int rc = 0;
+	struct sps_pipe *pipe_handle;
+	struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
+					(&dd->bam.prod) : (&dd->bam.cons);
+	struct sps_connect *pipe_conf = &pipe->config;
+
+	pipe->name   = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ? "cons" : "prod";
+	pipe->handle = NULL;
+	pipe_handle  = sps_alloc_endpoint();
+	if (!pipe_handle) {
+		dev_err(dd->dev, "%s: Failed to allocate BAM endpoint\n"
+								, __func__);
+		return -ENOMEM;
+	}
+
+	memset(pipe_conf, 0, sizeof(*pipe_conf));
+	rc = sps_get_config(pipe_handle, pipe_conf);
+	if (rc) {
+		dev_err(dd->dev, "%s: Failed to get BAM pipe config\n"
+			, __func__);
+		goto config_err;
+	}
+
+	if (pipe_dir == SPI_BAM_CONSUMER_PIPE) {
+		pipe_conf->source          = dd->bam.handle;
+		pipe_conf->destination     = SPS_DEV_HANDLE_MEM;
+		pipe_conf->mode            = SPS_MODE_SRC;
+		pipe_conf->src_pipe_index  =
+					dd->pdata->bam_producer_pipe_index;
+		pipe_conf->dest_pipe_index = 0;
+	} else {
+		pipe_conf->source          = SPS_DEV_HANDLE_MEM;
+		pipe_conf->destination     = dd->bam.handle;
+		pipe_conf->mode            = SPS_MODE_DEST;
+		pipe_conf->src_pipe_index  = 0;
+		pipe_conf->dest_pipe_index =
+					dd->pdata->bam_consumer_pipe_index;
+	}
+	pipe_conf->options = SPS_O_EOT | SPS_O_AUTO_ENABLE;
+	pipe_conf->desc.size = SPI_BAM_MAX_DESC_NUM * sizeof(struct sps_iovec);
+	pipe_conf->desc.base = dma_zalloc_coherent(dd->dev,
+				pipe_conf->desc.size,
+				&pipe_conf->desc.phys_base,
+				GFP_KERNEL);
+	if (!pipe_conf->desc.base) {
+		dev_err(dd->dev, "%s: Failed allocate BAM pipe memory\n"
+			, __func__);
+		rc = -ENOMEM;
+		goto config_err;
+	}
+	/* zero descriptor FIFO for convenient debugging of first descs */
+	memset(pipe_conf->desc.base, 0x00, pipe_conf->desc.size);
+
+	pipe->handle = pipe_handle;
+
+	return 0;
+
+config_err:
+	sps_free_endpoint(pipe_handle);
+
+	return rc;
+}
+
+static void msm_spi_bam_teardown(struct msm_spi *dd)
+{
+	msm_spi_bam_pipe_teardown(dd, SPI_BAM_PRODUCER_PIPE);
+	msm_spi_bam_pipe_teardown(dd, SPI_BAM_CONSUMER_PIPE);
+
+	if (dd->bam.deregister_required) {
+		sps_deregister_bam_device(dd->bam.handle);
+		dd->bam.deregister_required = false;
+	}
+}
+
+static int msm_spi_bam_init(struct msm_spi *dd)
+{
+	struct sps_bam_props bam_props = {0};
+	uintptr_t bam_handle;
+	int rc = 0;
+
+	rc = sps_phy2h(dd->bam.phys_addr, &bam_handle);
+	if (rc || !bam_handle) {
+		bam_props.phys_addr = dd->bam.phys_addr;
+		bam_props.virt_addr = dd->bam.base;
+		bam_props.irq       = dd->bam.irq;
+		bam_props.manage    = SPS_BAM_MGR_DEVICE_REMOTE;
+		bam_props.summing_threshold = 0x10;
+
+		rc = sps_register_bam_device(&bam_props, &bam_handle);
+		if (rc) {
+			dev_err(dd->dev,
+				"%s: Failed to register BAM device\n",
+				__func__);
+			return rc;
+		}
+		dd->bam.deregister_required = true;
+	}
+
+	dd->bam.handle = bam_handle;
+
+	rc = msm_spi_bam_pipe_init(dd, SPI_BAM_PRODUCER_PIPE);
+	if (rc) {
+		dev_err(dd->dev,
+			"%s: Failed to init producer BAM-pipe\n",
+			__func__);
+		goto bam_init_error;
+	}
+
+	rc = msm_spi_bam_pipe_init(dd, SPI_BAM_CONSUMER_PIPE);
+	if (rc) {
+		dev_err(dd->dev,
+			"%s: Failed to init consumer BAM-pipe\n",
+			__func__);
+		goto bam_init_error;
+	}
+
+	return 0;
+
+bam_init_error:
+	msm_spi_bam_teardown(dd);
+	return rc;
+}
+
+enum msm_spi_dt_entry_status {
+	DT_REQ,  /* Required:  fail if missing */
+	DT_SGST, /* Suggested: warn if missing */
+	DT_OPT,  /* Optional:  don't warn if missing */
+};
+
+enum msm_spi_dt_entry_type {
+	DT_U32,
+	DT_GPIO,
+	DT_BOOL,
+};
+
+struct msm_spi_dt_to_pdata_map {
+	const char                  *dt_name;
+	void                        *ptr_data;
+	enum msm_spi_dt_entry_status status;
+	enum msm_spi_dt_entry_type   type;
+	int                          default_val;
+};
+
+static int msm_spi_dt_to_pdata_populate(struct platform_device *pdev,
+					struct msm_spi_platform_data *pdata,
+					struct msm_spi_dt_to_pdata_map  *itr)
+{
+	int  ret, err = 0;
+	struct device_node *node = pdev->dev.of_node;
+
+	for (; itr->dt_name; ++itr) {
+		switch (itr->type) {
+		case DT_GPIO:
+			ret = of_get_named_gpio(node, itr->dt_name, 0);
+			if (ret >= 0) {
+				*((int *) itr->ptr_data) = ret;
+				ret = 0;
+			}
+			break;
+		case DT_U32:
+			ret = of_property_read_u32(node, itr->dt_name,
+							 (u32 *) itr->ptr_data);
+			break;
+		case DT_BOOL:
+			*((bool *) itr->ptr_data) =
+				of_property_read_bool(node, itr->dt_name);
+			ret = 0;
+			break;
+		default:
+			dev_err(&pdev->dev, "%d is an unknown DT entry type\n",
+								itr->type);
+			ret = -EBADE;
+		}
+
+		dev_dbg(&pdev->dev, "DT entry ret:%d name:%s val:%d\n",
+				ret, itr->dt_name, *((int *)itr->ptr_data));
+
+		if (ret) {
+			*((int *)itr->ptr_data) = itr->default_val;
+
+			if (itr->status < DT_OPT) {
+				dev_err(&pdev->dev, "Missing '%s' DT entry\n",
+								itr->dt_name);
+
+				/* cont on err to dump all missing entries */
+				if (itr->status == DT_REQ && !err)
+					err = ret;
+			}
+		}
+	}
+
+	return err;
+}
+
+/**
+ * msm_spi_dt_to_pdata: create pdata and read gpio config from device tree
+ */
+static struct msm_spi_platform_data *msm_spi_dt_to_pdata(
+			struct platform_device *pdev, struct msm_spi *dd)
+{
+	struct msm_spi_platform_data *pdata;
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return NULL;
+
+	if (pdata) {
+		struct msm_spi_dt_to_pdata_map map[] = {
+		{"spi-max-frequency",
+			&pdata->max_clock_speed,         DT_SGST, DT_U32,   0},
+		{"qcom,infinite-mode",
+			&pdata->infinite_mode,           DT_OPT,  DT_U32,   0},
+		{"qcom,master-id",
+			&pdata->master_id,               DT_SGST, DT_U32,   0},
+		{"qcom,bus-width",
+			&pdata->bus_width,               DT_OPT, DT_U32,   8},
+		{"qcom,ver-reg-exists",
+			&pdata->ver_reg_exists,          DT_OPT,  DT_BOOL,  0},
+		{"qcom,use-bam",
+			&pdata->use_bam,                 DT_OPT,  DT_BOOL,  0},
+		{"qcom,use-pinctrl",
+			&pdata->use_pinctrl,             DT_OPT,  DT_BOOL,  0},
+		{"qcom,bam-consumer-pipe-index",
+			&pdata->bam_consumer_pipe_index, DT_OPT,  DT_U32,   0},
+		{"qcom,bam-producer-pipe-index",
+			&pdata->bam_producer_pipe_index, DT_OPT,  DT_U32,   0},
+		{"qcom,gpio-clk",
+			&dd->spi_gpios[0],               DT_OPT,  DT_GPIO, -1},
+		{"qcom,gpio-miso",
+			&dd->spi_gpios[1],               DT_OPT,  DT_GPIO, -1},
+		{"qcom,gpio-mosi",
+			&dd->spi_gpios[2],               DT_OPT,  DT_GPIO, -1},
+		{"qcom,gpio-cs0",
+			&dd->cs_gpios[0].gpio_num,       DT_OPT,  DT_GPIO, -1},
+		{"qcom,gpio-cs1",
+			&dd->cs_gpios[1].gpio_num,       DT_OPT,  DT_GPIO, -1},
+		{"qcom,gpio-cs2",
+			&dd->cs_gpios[2].gpio_num,       DT_OPT,  DT_GPIO, -1},
+		{"qcom,gpio-cs3",
+			&dd->cs_gpios[3].gpio_num,       DT_OPT,  DT_GPIO, -1},
+		{"qcom,rt-priority",
+			&pdata->rt_priority,		 DT_OPT,  DT_BOOL,  0},
+		{"qcom,shared",
+			&pdata->is_shared,		 DT_OPT,  DT_BOOL,  0},
+		{NULL,  NULL,                            0,       0,        0},
+		};
+
+		if (msm_spi_dt_to_pdata_populate(pdev, pdata, map))
+			return NULL;
+	}
+
+	if (pdata->use_bam) {
+		if (!pdata->bam_consumer_pipe_index) {
+			dev_warn(&pdev->dev,
+			"missing qcom,bam-consumer-pipe-index entry in device-tree\n");
+			pdata->use_bam = false;
+		}
+
+		if (!pdata->bam_producer_pipe_index) {
+			dev_warn(&pdev->dev,
+			"missing qcom,bam-producer-pipe-index entry in device-tree\n");
+			pdata->use_bam = false;
+		}
+	}
+	return pdata;
+}
+
+static int msm_spi_get_qup_hw_ver(struct device *dev, struct msm_spi *dd)
+{
+	u32 data = readl_relaxed(dd->base + QUP_HARDWARE_VER);
+
+	return (data >= QUP_HARDWARE_VER_2_1_1) ? SPI_QUP_VERSION_BFAM
+						: SPI_QUP_VERSION_NONE;
+}
+
+static int msm_spi_bam_get_resources(struct msm_spi *dd,
+	struct platform_device *pdev, struct spi_master *master)
+{
+	struct resource *resource;
+	size_t bam_mem_size;
+
+	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"spi_bam_physical");
+	if (!resource) {
+		dev_warn(&pdev->dev,
+			"%s: Missing spi_bam_physical entry in DT\n",
+			__func__);
+		return -ENXIO;
+	}
+
+	dd->bam.phys_addr = resource->start;
+	bam_mem_size = resource_size(resource);
+	dd->bam.base = devm_ioremap(&pdev->dev, dd->bam.phys_addr,
+					bam_mem_size);
+	if (!dd->bam.base) {
+		dev_warn(&pdev->dev,
+			"%s: Failed to ioremap(spi_bam_physical)\n",
+			__func__);
+		return -ENXIO;
+	}
+
+	dd->bam.irq = platform_get_irq_byname(pdev, "spi_bam_irq");
+	if (dd->bam.irq < 0) {
+		dev_warn(&pdev->dev, "%s: Missing spi_bam_irq entry in DT\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	dd->dma_init = msm_spi_bam_init;
+	dd->dma_teardown = msm_spi_bam_teardown;
+	return 0;
+}
+
+static int init_resources(struct platform_device *pdev)
+{
+	struct spi_master *master = platform_get_drvdata(pdev);
+	struct msm_spi	  *dd;
+	int               rc = -ENXIO;
+	int               clk_enabled = 0;
+	int               pclk_enabled = 0;
+
+	dd = spi_master_get_devdata(master);
+
+	if (dd->pdata && dd->pdata->use_pinctrl) {
+		rc = msm_spi_pinctrl_init(dd);
+		if (rc) {
+			dev_err(&pdev->dev, "%s: pinctrl init failed\n",
+					 __func__);
+			return rc;
+		}
+	}
+
+	mutex_lock(&dd->core_lock);
+
+	dd->clk = clk_get(&pdev->dev, "core_clk");
+	if (IS_ERR(dd->clk)) {
+		dev_err(&pdev->dev, "%s: unable to get core_clk\n", __func__);
+		rc = PTR_ERR(dd->clk);
+		goto err_clk_get;
+	}
+
+	dd->pclk = clk_get(&pdev->dev, "iface_clk");
+	if (IS_ERR(dd->pclk)) {
+		dev_err(&pdev->dev, "%s: unable to get iface_clk\n", __func__);
+		rc = PTR_ERR(dd->pclk);
+		goto err_pclk_get;
+	}
+
+	if (dd->pdata && dd->pdata->max_clock_speed)
+		msm_spi_clock_set(dd, dd->pdata->max_clock_speed);
+
+	rc = clk_prepare_enable(dd->clk);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: unable to enable core_clk\n",
+			__func__);
+		goto err_clk_enable;
+	}
+
+	clk_enabled = 1;
+	rc = clk_prepare_enable(dd->pclk);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: unable to enable iface_clk\n",
+		__func__);
+		goto err_pclk_enable;
+	}
+
+	pclk_enabled = 1;
+
+	if (dd->pdata && dd->pdata->ver_reg_exists) {
+		enum msm_spi_qup_version ver =
+					msm_spi_get_qup_hw_ver(&pdev->dev, dd);
+		if (dd->qup_ver != ver)
+			dev_warn(&pdev->dev,
+			"%s: HW version different then assumed by probe\n",
+			__func__);
+	}
+
+	/* GSBI dose not exists on B-family MSM-chips */
+	if (dd->qup_ver != SPI_QUP_VERSION_BFAM) {
+		rc = msm_spi_configure_gsbi(dd, pdev);
+		if (rc)
+			goto err_config_gsbi;
+	}
+
+	msm_spi_calculate_fifo_size(dd);
+	if (dd->use_dma) {
+		rc = dd->dma_init(dd);
+		if (rc) {
+			dev_err(&pdev->dev,
+				"%s: failed to init DMA. Disabling DMA mode\n",
+				__func__);
+			dd->use_dma = false;
+		}
+	}
+
+	msm_spi_register_init(dd);
+	/*
+	 * The SPI core generates a bogus input overrun error on some targets,
+	 * when a transition from run to reset state occurs and if the FIFO has
+	 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
+	 * bit.
+	 */
+	msm_spi_enable_error_flags(dd);
+
+	writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL);
+	rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
+	if (rc)
+		goto err_spi_state;
+
+	clk_disable_unprepare(dd->clk);
+	clk_disable_unprepare(dd->pclk);
+	clk_enabled = 0;
+	pclk_enabled = 0;
+
+	dd->transfer_pending = false;
+	dd->tx_mode = SPI_MODE_NONE;
+	dd->rx_mode = SPI_MODE_NONE;
+
+	rc = msm_spi_request_irq(dd, pdev, master);
+	if (rc)
+		goto err_irq;
+
+	msm_spi_disable_irqs(dd);
+
+	mutex_unlock(&dd->core_lock);
+	return 0;
+
+err_irq:
+err_spi_state:
+	if (dd->use_dma && dd->dma_teardown)
+		dd->dma_teardown(dd);
+err_config_gsbi:
+	if (pclk_enabled)
+		clk_disable_unprepare(dd->pclk);
+err_pclk_enable:
+	if (clk_enabled)
+		clk_disable_unprepare(dd->clk);
+err_clk_enable:
+	clk_put(dd->pclk);
+err_pclk_get:
+	clk_put(dd->clk);
+err_clk_get:
+	mutex_unlock(&dd->core_lock);
+	return rc;
+}
+
+static int msm_spi_probe(struct platform_device *pdev)
+{
+	struct spi_master      *master;
+	struct msm_spi	       *dd;
+	struct resource	       *resource;
+	int			i = 0;
+	int                     rc = -ENXIO;
+	struct msm_spi_platform_data *pdata;
+
+	master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi));
+	if (!master) {
+		rc = -ENOMEM;
+		dev_err(&pdev->dev, "master allocation failed\n");
+		goto err_probe_exit;
+	}
+
+	master->bus_num        = pdev->id;
+	master->mode_bits      = SPI_SUPPORTED_MODES;
+	master->num_chipselect = SPI_NUM_CHIPSELECTS;
+	master->set_cs	       = msm_spi_set_cs;
+	master->setup          = msm_spi_setup;
+	master->prepare_transfer_hardware = msm_spi_prepare_transfer_hardware;
+	master->transfer_one = msm_spi_transfer_one;
+	master->unprepare_transfer_hardware
+			= msm_spi_unprepare_transfer_hardware;
+
+	platform_set_drvdata(pdev, master);
+	dd = spi_master_get_devdata(master);
+
+	if (pdev->dev.of_node) {
+		dd->qup_ver = SPI_QUP_VERSION_BFAM;
+		master->dev.of_node = pdev->dev.of_node;
+		pdata = msm_spi_dt_to_pdata(pdev, dd);
+		if (!pdata) {
+			dev_err(&pdev->dev, "platform data allocation failed\n");
+			rc = -ENOMEM;
+			goto err_probe_exit;
+		}
+
+		rc = of_alias_get_id(pdev->dev.of_node, "spi");
+		if (rc < 0)
+			dev_warn(&pdev->dev,
+				"using default bus_num %d\n", pdev->id);
+		else
+			master->bus_num = pdev->id = rc;
+	} else {
+		pdata = pdev->dev.platform_data;
+		dd->qup_ver = SPI_QUP_VERSION_NONE;
+
+		for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
+			resource = platform_get_resource(pdev, IORESOURCE_IO,
+							i);
+			dd->spi_gpios[i] = resource ? resource->start : -1;
+		}
+
+		for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
+			resource = platform_get_resource(pdev, IORESOURCE_IO,
+						i + ARRAY_SIZE(spi_rsrcs));
+			dd->cs_gpios[i].gpio_num = resource ?
+							resource->start : -1;
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i)
+		dd->cs_gpios[i].valid = false;
+
+	dd->pdata = pdata;
+	resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!resource) {
+		rc = -ENXIO;
+		goto err_probe_res;
+	}
+
+	dd->mem_phys_addr = resource->start;
+	dd->mem_size = resource_size(resource);
+	dd->dev = &pdev->dev;
+
+	if (pdata) {
+		master->rt = pdata->rt_priority;
+		if (pdata->dma_config) {
+			rc = pdata->dma_config();
+			if (rc) {
+				dev_warn(&pdev->dev,
+					"%s: DM mode not supported\n",
+					__func__);
+				dd->use_dma = false;
+				goto skip_dma_resources;
+			}
+		}
+		if (!dd->pdata->use_bam)
+			goto skip_dma_resources;
+
+		rc = msm_spi_bam_get_resources(dd, pdev, master);
+		if (rc) {
+			dev_warn(dd->dev,
+					"%s: Failed to get BAM resources\n",
+					__func__);
+			goto skip_dma_resources;
+		}
+		dd->use_dma = true;
+	}
+
+	spi_dma_mask(&pdev->dev);
+skip_dma_resources:
+
+	spin_lock_init(&dd->queue_lock);
+	mutex_init(&dd->core_lock);
+	init_waitqueue_head(&dd->continue_suspend);
+
+	if (!devm_request_mem_region(&pdev->dev, dd->mem_phys_addr,
+					dd->mem_size, SPI_DRV_NAME)) {
+		rc = -ENXIO;
+		goto err_probe_reqmem;
+	}
+
+	dd->base = devm_ioremap(&pdev->dev, dd->mem_phys_addr, dd->mem_size);
+	if (!dd->base) {
+		rc = -ENOMEM;
+		goto err_probe_reqmem;
+	}
+
+	pm_runtime_set_autosuspend_delay(&pdev->dev, MSEC_PER_SEC);
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
+	dd->suspended = true;
+	rc = spi_register_master(master);
+	if (rc)
+		goto err_probe_reg_master;
+
+	rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
+		goto err_attrs;
+	}
+	spi_debugfs_init(dd);
+
+	return 0;
+
+err_attrs:
+	spi_unregister_master(master);
+err_probe_reg_master:
+	pm_runtime_disable(&pdev->dev);
+err_probe_reqmem:
+err_probe_res:
+	spi_master_put(master);
+err_probe_exit:
+	return rc;
+}
+
+static int msm_spi_pm_suspend_runtime(struct device *device)
+{
+	struct platform_device *pdev = to_platform_device(device);
+	struct spi_master *master = platform_get_drvdata(pdev);
+	struct msm_spi	  *dd;
+	unsigned long	   flags;
+
+	dev_dbg(device, "pm_runtime: suspending...\n");
+	if (!master)
+		goto suspend_exit;
+	dd = spi_master_get_devdata(master);
+	if (!dd)
+		goto suspend_exit;
+
+	if (dd->suspended)
+		return 0;
+
+	/*
+	 * Make sure nothing is added to the queue while we're
+	 * suspending
+	 */
+	spin_lock_irqsave(&dd->queue_lock, flags);
+	dd->suspended = true;
+	spin_unlock_irqrestore(&dd->queue_lock, flags);
+
+	/* Wait for transactions to end, or time out */
+	wait_event_interruptible(dd->continue_suspend,
+		!dd->transfer_pending);
+
+	mutex_lock(&dd->core_lock);
+	if (dd->pdata && !dd->pdata->is_shared && dd->use_dma) {
+		msm_spi_bam_pipe_disconnect(dd, &dd->bam.prod);
+		msm_spi_bam_pipe_disconnect(dd, &dd->bam.cons);
+	}
+	if (dd->pdata && !dd->pdata->is_shared)
+		put_local_resources(dd);
+
+	if (dd->pdata)
+		msm_spi_clk_path_vote(dd, 0);
+	mutex_unlock(&dd->core_lock);
+
+suspend_exit:
+	return 0;
+}
+
+static int msm_spi_pm_resume_runtime(struct device *device)
+{
+	struct platform_device *pdev = to_platform_device(device);
+	struct spi_master *master = platform_get_drvdata(pdev);
+	struct msm_spi	  *dd;
+	int               ret = 0;
+
+	dev_dbg(device, "pm_runtime: resuming...\n");
+	if (!master)
+		goto resume_exit;
+	dd = spi_master_get_devdata(master);
+	if (!dd)
+		goto resume_exit;
+
+	if (!dd->suspended)
+		return 0;
+	if (!dd->is_init_complete) {
+		ret = init_resources(pdev);
+		if (ret != 0)
+			return ret;
+
+		dd->is_init_complete = true;
+	}
+	msm_spi_clk_path_init(dd);
+	msm_spi_clk_path_vote(dd, dd->pdata->max_clock_speed);
+
+	if (!dd->pdata->is_shared) {
+		ret = get_local_resources(dd);
+		if (ret)
+			return ret;
+	}
+	if (!dd->pdata->is_shared && dd->use_dma) {
+		msm_spi_bam_pipe_connect(dd, &dd->bam.prod,
+				&dd->bam.prod.config);
+		msm_spi_bam_pipe_connect(dd, &dd->bam.cons,
+				&dd->bam.cons.config);
+	}
+	dd->suspended = false;
+
+resume_exit:
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int msm_spi_suspend(struct device *device)
+{
+	if (!pm_runtime_enabled(device) || !pm_runtime_suspended(device)) {
+		struct platform_device *pdev = to_platform_device(device);
+		struct spi_master *master = platform_get_drvdata(pdev);
+		struct msm_spi   *dd;
+
+		dev_dbg(device, "system suspend\n");
+		if (!master)
+			goto suspend_exit;
+		dd = spi_master_get_devdata(master);
+		if (!dd)
+			goto suspend_exit;
+		msm_spi_pm_suspend_runtime(device);
+
+		/*
+		 * set the device's runtime PM status to 'suspended'
+		 */
+		pm_runtime_disable(device);
+		pm_runtime_set_suspended(device);
+		pm_runtime_enable(device);
+	}
+suspend_exit:
+	return 0;
+}
+
+static int msm_spi_resume(struct device *device)
+{
+	/*
+	 * Rely on runtime-PM to call resume in case it is enabled
+	 * Even if it's not enabled, rely on 1st client transaction to do
+	 * clock ON and gpio configuration
+	 */
+	dev_dbg(device, "system resume\n");
+	return 0;
+}
+#else
+#define msm_spi_suspend NULL
+#define msm_spi_resume NULL
+#endif
+
+
+static int msm_spi_remove(struct platform_device *pdev)
+{
+	struct spi_master *master = platform_get_drvdata(pdev);
+	struct msm_spi    *dd = spi_master_get_devdata(master);
+
+	spi_debugfs_exit(dd);
+	sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
+
+	if (dd->dma_teardown)
+		dd->dma_teardown(dd);
+	pm_runtime_disable(&pdev->dev);
+	pm_runtime_set_suspended(&pdev->dev);
+	clk_put(dd->clk);
+	clk_put(dd->pclk);
+	msm_spi_clk_path_teardown(dd);
+	platform_set_drvdata(pdev, NULL);
+	spi_unregister_master(master);
+	spi_master_put(master);
+
+	return 0;
+}
+
+static const struct of_device_id msm_spi_dt_match[] = {
+	{
+		.compatible = "qcom,spi-qup-v2",
+	},
+	{}
+};
+
+static const struct dev_pm_ops msm_spi_dev_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(msm_spi_suspend, msm_spi_resume)
+	SET_RUNTIME_PM_OPS(msm_spi_pm_suspend_runtime,
+			msm_spi_pm_resume_runtime, NULL)
+};
+
+static struct platform_driver msm_spi_driver = {
+	.driver		= {
+		.name	= SPI_DRV_NAME,
+		.pm		= &msm_spi_dev_pm_ops,
+		.of_match_table = msm_spi_dt_match,
+	},
+	.probe		= msm_spi_probe,
+	.remove		= msm_spi_remove,
+};
+
+module_platform_driver(msm_spi_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:"SPI_DRV_NAME);
diff --git a/drivers/spi/spi_qsd.h b/drivers/spi/spi_qsd.h
new file mode 100644
index 0000000..0ba8abd
--- /dev/null
+++ b/drivers/spi/spi_qsd.h
@@ -0,0 +1,558 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2014-2018, 2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SPI_QSD_H
+#define _SPI_QSD_H
+
+#include <linux/pinctrl/consumer.h>
+#define SPI_DRV_NAME                  "spi_qsd"
+
+#if IS_ENABLED(CONFIG_SPI_QSD) || IS_ENABLED(CONFIG_SPI_QSD_MODULE)
+
+#define QSD_REG(x) (x)
+#define QUP_REG(x)
+
+#define SPI_FIFO_WORD_CNT             0x0048
+
+#else
+
+#define QSD_REG(x)
+#define QUP_REG(x) (x)
+
+#define QUP_CONFIG                    0x0000 /* N & NO_INPUT/NO_OUTPUT bits */
+#define QUP_ERROR_FLAGS_EN            0x030C
+#define QUP_ERR_MASK                  0x3
+#define SPI_OUTPUT_FIFO_WORD_CNT      0x010C
+#define SPI_INPUT_FIFO_WORD_CNT       0x0214
+#define QUP_MX_WRITE_COUNT            0x0150
+#define QUP_MX_WRITE_CNT_CURRENT      0x0154
+
+#define QUP_CONFIG_SPI_MODE           0x0100
+#endif
+
+#define GSBI_CTRL_REG                 0x0
+#define GSBI_SPI_CONFIG               0x30
+/* B-family only registers */
+#define QUP_HARDWARE_VER              0x0030
+#define QUP_HARDWARE_VER_2_1_1        0X20010001
+#define QUP_OPERATIONAL_MASK          0x0028
+#define QUP_OP_MASK_OUTPUT_SERVICE_FLAG 0x100
+#define QUP_OP_MASK_INPUT_SERVICE_FLAG  0x200
+
+#define QUP_ERROR_FLAGS               0x0308
+
+#define SPI_CONFIG                    (QSD_REG(0x0000) QUP_REG(0x0300))
+#define SPI_IO_CONTROL                (QSD_REG(0x0004) QUP_REG(0x0304))
+#define SPI_IO_MODES                  (QSD_REG(0x0008) QUP_REG(0x0008))
+#define SPI_SW_RESET                  (QSD_REG(0x000C) QUP_REG(0x000C))
+#define SPI_TIME_OUT_CURRENT          (QSD_REG(0x0014) QUP_REG(0x0014))
+#define SPI_MX_OUTPUT_COUNT           (QSD_REG(0x0018) QUP_REG(0x0100))
+#define SPI_MX_OUTPUT_CNT_CURRENT     (QSD_REG(0x001C) QUP_REG(0x0104))
+#define SPI_MX_INPUT_COUNT            (QSD_REG(0x0020) QUP_REG(0x0200))
+#define SPI_MX_INPUT_CNT_CURRENT      (QSD_REG(0x0024) QUP_REG(0x0204))
+#define SPI_MX_READ_COUNT             (QSD_REG(0x0028) QUP_REG(0x0208))
+#define SPI_MX_READ_CNT_CURRENT       (QSD_REG(0x002C) QUP_REG(0x020C))
+#define SPI_OPERATIONAL               (QSD_REG(0x0030) QUP_REG(0x0018))
+#define SPI_ERROR_FLAGS               (QSD_REG(0x0034) QUP_REG(0x001C))
+#define SPI_ERROR_FLAGS_EN            (QSD_REG(0x0038) QUP_REG(0x0020))
+#define SPI_DEASSERT_WAIT             (QSD_REG(0x003C) QUP_REG(0x0310))
+#define SPI_OUTPUT_DEBUG              (QSD_REG(0x0040) QUP_REG(0x0108))
+#define SPI_INPUT_DEBUG               (QSD_REG(0x0044) QUP_REG(0x0210))
+#define SPI_TEST_CTRL                 (QSD_REG(0x004C) QUP_REG(0x0024))
+#define SPI_OUTPUT_FIFO               (QSD_REG(0x0100) QUP_REG(0x0110))
+#define SPI_INPUT_FIFO                (QSD_REG(0x0200) QUP_REG(0x0218))
+#define SPI_STATE                     (QSD_REG(SPI_OPERATIONAL) QUP_REG(0x0004))
+
+/* QUP_CONFIG fields */
+#define SPI_CFG_N                     0x0000001F
+#define SPI_NO_INPUT                  0x00000080
+#define SPI_NO_OUTPUT                 0x00000040
+#define SPI_EN_EXT_OUT_FLAG           0x00010000
+
+/* SPI_CONFIG fields */
+#define SPI_CFG_LOOPBACK              0x00000100
+#define SPI_CFG_INPUT_FIRST           0x00000200
+#define SPI_CFG_HS_MODE               0x00000400
+
+/* SPI_IO_CONTROL fields */
+#define SPI_IO_C_FORCE_CS             0x00000800
+#define SPI_IO_C_CLK_IDLE_HIGH        0x00000400
+#define SPI_IO_C_MX_CS_MODE           0x00000100
+#define SPI_IO_C_CS_N_POLARITY        0x000000F0
+#define SPI_IO_C_CS_N_POLARITY_0      0x00000010
+#define SPI_IO_C_CS_SELECT            0x0000000C
+#define SPI_IO_C_TRISTATE_CS          0x00000002
+#define SPI_IO_C_NO_TRI_STATE         0x00000001
+
+/* SPI_IO_MODES fields */
+#define SPI_IO_M_OUTPUT_BIT_SHIFT_EN  (QSD_REG(0x00004000) QUP_REG(0x00010000))
+#define SPI_IO_M_PACK_EN              (QSD_REG(0x00002000) QUP_REG(0x00008000))
+#define SPI_IO_M_UNPACK_EN            (QSD_REG(0x00001000) QUP_REG(0x00004000))
+#define SPI_IO_M_INPUT_MODE           (QSD_REG(0x00000C00) QUP_REG(0x00003000))
+#define SPI_IO_M_OUTPUT_MODE          (QSD_REG(0x00000300) QUP_REG(0x00000C00))
+#define SPI_IO_M_INPUT_FIFO_SIZE      (QSD_REG(0x000000C0) QUP_REG(0x00000380))
+#define SPI_IO_M_INPUT_BLOCK_SIZE     (QSD_REG(0x00000030) QUP_REG(0x00000060))
+#define SPI_IO_M_OUTPUT_FIFO_SIZE     (QSD_REG(0x0000000C) QUP_REG(0x0000001C))
+#define SPI_IO_M_OUTPUT_BLOCK_SIZE    (QSD_REG(0x00000003) QUP_REG(0x00000003))
+
+#define INPUT_BLOCK_SZ_SHIFT          (QSD_REG(4)          QUP_REG(5))
+#define INPUT_FIFO_SZ_SHIFT           (QSD_REG(6)          QUP_REG(7))
+#define OUTPUT_BLOCK_SZ_SHIFT         (QSD_REG(0)          QUP_REG(0))
+#define OUTPUT_FIFO_SZ_SHIFT          (QSD_REG(2)          QUP_REG(2))
+#define OUTPUT_MODE_SHIFT             (QSD_REG(8)          QUP_REG(10))
+#define INPUT_MODE_SHIFT              (QSD_REG(10)         QUP_REG(12))
+
+/* SPI_OPERATIONAL fields */
+#define SPI_OP_IN_BLK_RD_REQ_FLAG     0x00002000
+#define SPI_OP_OUT_BLK_WR_REQ_FLAG    0x00001000
+#define SPI_OP_MAX_INPUT_DONE_FLAG    0x00000800
+#define SPI_OP_MAX_OUTPUT_DONE_FLAG   0x00000400
+#define SPI_OP_INPUT_SERVICE_FLAG     0x00000200
+#define SPI_OP_OUTPUT_SERVICE_FLAG    0x00000100
+#define SPI_OP_INPUT_FIFO_FULL        0x00000080
+#define SPI_OP_OUTPUT_FIFO_FULL       0x00000040
+#define SPI_OP_IP_FIFO_NOT_EMPTY      0x00000020
+#define SPI_OP_OP_FIFO_NOT_EMPTY      0x00000010
+#define SPI_OP_STATE_VALID            0x00000004
+#define SPI_OP_STATE                  0x00000003
+
+#define SPI_OP_STATE_CLEAR_BITS       0x2
+
+#define SPI_PINCTRL_STATE_DEFAULT "spi_default"
+#define SPI_PINCTRL_STATE_SLEEP "spi_sleep"
+
+enum msm_spi_state {
+	SPI_OP_STATE_RESET = 0x00000000,
+	SPI_OP_STATE_RUN   = 0x00000001,
+	SPI_OP_STATE_PAUSE = 0x00000003,
+};
+
+/* SPI_ERROR_FLAGS fields */
+#define SPI_ERR_OUTPUT_OVER_RUN_ERR   0x00000020
+#define SPI_ERR_INPUT_UNDER_RUN_ERR   0x00000010
+#define SPI_ERR_OUTPUT_UNDER_RUN_ERR  0x00000008
+#define SPI_ERR_INPUT_OVER_RUN_ERR    0x00000004
+#define SPI_ERR_CLK_OVER_RUN_ERR      0x00000002
+#define SPI_ERR_CLK_UNDER_RUN_ERR     0x00000001
+
+/* We don't allow transactions larger than 4K-64 or 64K-64 due to
+ * mx_input/output_cnt register size
+ */
+#define SPI_MAX_TRANSFERS             (QSD_REG(0xFC0) QUP_REG(0xFC0))
+#define SPI_MAX_LEN                   (SPI_MAX_TRANSFERS * dd->bytes_per_word)
+
+#define SPI_NUM_CHIPSELECTS           4
+#define SPI_SUPPORTED_MODES  (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP)
+
+/* high speed mode is when bus rate is greater then 26MHz */
+#define SPI_HS_MIN_RATE               (26000000)
+
+#define SPI_DELAY_THRESHOLD           1
+/* Default timeout is 10 milliseconds */
+#define SPI_DEFAULT_TIMEOUT           10
+/* 250 microseconds */
+#define SPI_TRYLOCK_DELAY             250
+
+/* Data Mover burst size */
+#define DM_BURST_SIZE                 16
+/* Data Mover commands should be aligned to 64 bit(8 bytes) */
+#define DM_BYTE_ALIGN                 8
+
+#if defined(CONFIG_ARM64) || defined(CONFIG_LPAE)
+#define spi_dma_mask(dev)   (dma_set_mask((dev), DMA_BIT_MASK(36)))
+#else
+#define spi_dma_mask(dev)   (dma_set_mask((dev), DMA_BIT_MASK(32)))
+#endif
+
+
+enum msm_spi_qup_version {
+	SPI_QUP_VERSION_NONE    = 0x0,
+	SPI_QUP_VERSION_BFAM    = 0x2,
+};
+
+enum msm_spi_pipe_direction {
+	SPI_BAM_CONSUMER_PIPE   = 0x0,
+	SPI_BAM_PRODUCER_PIPE   = 0x1,
+};
+
+#define SPI_BAM_MAX_DESC_NUM      32
+#define SPI_MAX_TRFR_BTWN_RESETS  ((64 * 1024) - 16)  /* 64KB - 16byte */
+
+enum msm_spi_clk_path_vec_idx {
+	MSM_SPI_CLK_PATH_SUSPEND_VEC = 0,
+	MSM_SPI_CLK_PATH_RESUME_VEC  = 1,
+};
+#define MSM_SPI_CLK_PATH_AVRG_BW(dd) (76800000)
+#define MSM_SPI_CLK_PATH_BRST_BW(dd) (76800000)
+
+static char const * const spi_rsrcs[] = {
+	"spi_clk",
+	"spi_miso",
+	"spi_mosi"
+};
+
+static char const * const spi_cs_rsrcs[] = {
+	"spi_cs",
+	"spi_cs1",
+	"spi_cs2",
+	"spi_cs3",
+};
+
+enum msm_spi_mode {
+	SPI_FIFO_MODE  = 0x0,  /* 00 */
+	SPI_BLOCK_MODE = 0x1,  /* 01 */
+	SPI_BAM_MODE   = 0x3,  /* 11 */
+	SPI_MODE_NONE  = 0xFF, /* invalid value */
+};
+
+/* Structure for SPI CS GPIOs */
+struct spi_cs_gpio {
+	int  gpio_num;
+	bool valid;
+};
+
+#ifdef CONFIG_DEBUG_FS
+struct msm_spi_debugfs_data {
+	int offset;
+	struct msm_spi *dd;
+};
+/* Used to create debugfs entries */
+static struct msm_spi_regs{
+	const char *name;
+	mode_t mode;
+	int offset;
+} debugfs_spi_regs[] = {
+	{"config",               0644,  SPI_CONFIG },
+	{"io_control",           0644,  SPI_IO_CONTROL },
+	{"io_modes",             0644,  SPI_IO_MODES },
+	{"sw_reset",             0200,  SPI_SW_RESET },
+	{"time_out_current",     0444,  SPI_TIME_OUT_CURRENT },
+	{"mx_output_count",      0644,  SPI_MX_OUTPUT_COUNT },
+	{"mx_output_cnt_current", 0444, SPI_MX_OUTPUT_CNT_CURRENT },
+	{"mx_input_count",       0644,  SPI_MX_INPUT_COUNT },
+	{"mx_input_cnt_current", 0444,  SPI_MX_INPUT_CNT_CURRENT },
+	{"mx_read_count",        0644,  SPI_MX_READ_COUNT },
+	{"mx_read_cnt_current",  0444,  SPI_MX_READ_CNT_CURRENT },
+	{"operational",          0644,  SPI_OPERATIONAL },
+	{"error_flags",          0644,  SPI_ERROR_FLAGS },
+	{"error_flags_en",       0644,  SPI_ERROR_FLAGS_EN },
+	{"deassert_wait",        0644,  SPI_DEASSERT_WAIT },
+	{"output_debug",         0444,  SPI_OUTPUT_DEBUG },
+	{"input_debug",          0444,  SPI_INPUT_DEBUG },
+	{"test_ctrl",            0644,  SPI_TEST_CTRL },
+	{"output_fifo",          0200,  SPI_OUTPUT_FIFO },
+	{"input_fifo",           0400,  SPI_INPUT_FIFO },
+	{"spi_state",            0644,  SPI_STATE },
+#if IS_ENABLED(CONFIG_SPI_QSD) || IS_ENABLED(CONFIG_SPI_QSD_MODULE)
+	{"fifo_word_cnt",        0444,  SPI_FIFO_WORD_CNT},
+#else
+	{"qup_config",           0644,  QUP_CONFIG},
+	{"qup_error_flags",      0644,  QUP_ERROR_FLAGS},
+	{"qup_error_flags_en",   0644,  QUP_ERROR_FLAGS_EN},
+	{"mx_write_cnt",         0644,  QUP_MX_WRITE_COUNT},
+	{"mx_write_cnt_current", 0444,  QUP_MX_WRITE_CNT_CURRENT},
+	{"output_fifo_word_cnt", 0444,  SPI_OUTPUT_FIFO_WORD_CNT},
+	{"input_fifo_word_cnt",  0444,  SPI_INPUT_FIFO_WORD_CNT},
+#endif
+};
+#endif
+
+struct msm_spi_bam_pipe {
+	const char              *name;
+	struct sps_pipe         *handle;
+	struct sps_connect       config;
+	bool                     teardown_required;
+};
+
+struct msm_spi_bam {
+	void __iomem            *base;
+	phys_addr_t              phys_addr;
+	uintptr_t                handle;
+	int                      irq;
+	struct msm_spi_bam_pipe  prod;
+	struct msm_spi_bam_pipe  cons;
+	bool                     deregister_required;
+	u32			 curr_rx_bytes_recvd;
+	u32			 curr_tx_bytes_sent;
+	u32			 bam_rx_len;
+	u32			 bam_tx_len;
+};
+
+struct msm_spi {
+	u8                      *read_buf;
+	const u8                *write_buf;
+	void __iomem            *base;
+	struct device           *dev;
+	spinlock_t               queue_lock;
+	struct mutex             core_lock;
+	struct spi_device       *spi;
+	struct spi_transfer     *cur_transfer;
+	struct completion        tx_transfer_complete;
+	struct completion        rx_transfer_complete;
+	struct clk              *clk;    /* core clock */
+	struct clk              *pclk;   /* interface clock */
+	struct msm_bus_client_handle *bus_cl_hdl;
+	unsigned long            mem_phys_addr;
+	size_t                   mem_size;
+	int                      input_fifo_size;
+	int                      output_fifo_size;
+	u32                      rx_bytes_remaining;
+	u32                      tx_bytes_remaining;
+	u32                      clock_speed;
+	int                      irq_in;
+	int                      read_xfr_cnt;
+	int                      write_xfr_cnt;
+	int                      write_len;
+	int                      read_len;
+#if IS_ENABLED(CONFIG_SPI_QSD) || IS_ENABLED(CONFIG_SPI_QSD_MODULE)
+	int                      irq_out;
+	int                      irq_err;
+#endif
+	int                      bytes_per_word;
+	bool                     suspended;
+	bool                     transfer_pending;
+	wait_queue_head_t        continue_suspend;
+	/* DMA data */
+	enum msm_spi_mode        tx_mode;
+	enum msm_spi_mode        rx_mode;
+	bool                     use_dma;
+	int                      tx_dma_chan;
+	int                      tx_dma_crci;
+	int                      rx_dma_chan;
+	int                      rx_dma_crci;
+	int                      (*dma_init)(struct msm_spi *dd);
+	void                     (*dma_teardown)(struct msm_spi *dd);
+	struct msm_spi_bam       bam;
+	int                      input_block_size;
+	int                      output_block_size;
+	int                      input_burst_size;
+	int                      output_burst_size;
+	atomic_t                 rx_irq_called;
+	atomic_t                 tx_irq_called;
+	/* Used to pad messages unaligned to block size */
+	u8                       *tx_padding;
+	dma_addr_t               tx_padding_dma;
+	u8                       *rx_padding;
+	dma_addr_t               rx_padding_dma;
+	u32                      tx_unaligned_len;
+	u32                      rx_unaligned_len;
+	/* DMA statistics */
+	int                      stat_rx;
+	int                      stat_tx;
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *dent_spi;
+	struct dentry *debugfs_spi_regs[ARRAY_SIZE(debugfs_spi_regs)];
+	struct msm_spi_debugfs_data reg_data[ARRAY_SIZE(debugfs_spi_regs)];
+#endif
+	struct msm_spi_platform_data *pdata; /* Platform data */
+	/* When set indicates multiple transfers in a single message */
+	bool                     rx_done;
+	bool                     tx_done;
+	u32                      cur_msg_len;
+	/* Used in FIFO mode to keep track of the transfer being processed */
+	struct spi_transfer     *cur_tx_transfer;
+	struct spi_transfer     *cur_rx_transfer;
+	/* Temporary buffer used for WR-WR or WR-RD transfers */
+	u8                      *temp_buf;
+	/* GPIO pin numbers for SPI clk, miso and mosi */
+	int                      spi_gpios[ARRAY_SIZE(spi_rsrcs)];
+	/* SPI CS GPIOs for each slave */
+	struct spi_cs_gpio       cs_gpios[ARRAY_SIZE(spi_cs_rsrcs)];
+	enum msm_spi_qup_version qup_ver;
+	int			 max_trfr_len;
+	u16			 xfrs_delay_usec;
+	struct pinctrl		*pinctrl;
+	struct pinctrl_state	*pins_active;
+	struct pinctrl_state	*pins_sleep;
+	bool			is_init_complete;
+	bool			pack_words;
+};
+
+/* Forward declaration */
+static irqreturn_t msm_spi_input_irq(int irq, void *dev_id);
+static irqreturn_t msm_spi_output_irq(int irq, void *dev_id);
+static irqreturn_t msm_spi_error_irq(int irq, void *dev_id);
+static inline int msm_spi_set_state(struct msm_spi *dd,
+				    enum msm_spi_state state);
+static void msm_spi_write_word_to_fifo(struct msm_spi *dd);
+static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd);
+static irqreturn_t msm_spi_qup_irq(int irq, void *dev_id);
+
+#if IS_ENABLED(CONFIG_SPI_QSD) || IS_ENABLED(CONFIG_SPI_QSD_MODULE)
+static inline void msm_spi_disable_irqs(struct msm_spi *dd)
+{
+	disable_irq(dd->irq_in);
+	disable_irq(dd->irq_out);
+	disable_irq(dd->irq_err);
+}
+
+static inline void msm_spi_enable_irqs(struct msm_spi *dd)
+{
+	enable_irq(dd->irq_in);
+	enable_irq(dd->irq_out);
+	enable_irq(dd->irq_err);
+}
+
+static inline int msm_spi_request_irq(struct msm_spi *dd,
+				struct platform_device *pdev,
+				struct spi_master *master)
+{
+	int rc;
+
+	dd->irq_in  = platform_get_irq(pdev, 0);
+	dd->irq_out = platform_get_irq(pdev, 1);
+	dd->irq_err = platform_get_irq(pdev, 2);
+	if ((dd->irq_in < 0) || (dd->irq_out < 0) || (dd->irq_err < 0))
+		return -EINVAL;
+
+	rc = devm_request_irq(dd->dev, dd->irq_in, msm_spi_input_irq,
+		IRQF_TRIGGER_RISING, pdev->name, dd);
+	if (rc)
+		goto error_irq;
+
+	rc = devm_request_irq(dd->dev, dd->irq_out, msm_spi_output_irq,
+		IRQF_TRIGGER_RISING, pdev->name, dd);
+	if (rc)
+		goto error_irq;
+
+	rc = devm_request_irq(dd->dev, dd->irq_err, msm_spi_error_irq,
+		IRQF_TRIGGER_RISING, pdev->name, master);
+	if (rc)
+		goto error_irq;
+
+error_irq:
+	return rc;
+}
+
+static inline void msm_spi_get_clk_err(struct msm_spi *dd, u32 *spi_err) {}
+static inline void msm_spi_ack_clk_err(struct msm_spi *dd) {}
+static inline void msm_spi_set_qup_config(struct msm_spi *dd, int bpw) {}
+
+static inline int  msm_spi_prepare_for_write(struct msm_spi *dd) { return 0; }
+static inline void msm_spi_start_write(struct msm_spi *dd, u32 read_count)
+{
+	msm_spi_write_word_to_fifo(dd);
+}
+static inline void msm_spi_set_write_count(struct msm_spi *dd, int val) {}
+
+static inline void msm_spi_complete(struct msm_spi *dd)
+{
+	complete(&dd->transfer_complete);
+}
+
+static inline void msm_spi_enable_error_flags(struct msm_spi *dd)
+{
+	writel_relaxed(0x0000007B, dd->base + SPI_ERROR_FLAGS_EN);
+}
+
+static inline void msm_spi_clear_error_flags(struct msm_spi *dd)
+{
+	writel_relaxed(0x0000007F, dd->base + SPI_ERROR_FLAGS);
+}
+
+#else
+/* In QUP the same interrupt line is used for input, output and error*/
+static inline int msm_spi_request_irq(struct msm_spi *dd,
+				struct platform_device *pdev,
+				struct spi_master *master)
+{
+	dd->irq_in  = platform_get_irq(pdev, 0);
+	if (dd->irq_in < 0)
+		return -EINVAL;
+
+	return devm_request_irq(dd->dev, dd->irq_in, msm_spi_qup_irq,
+		IRQF_TRIGGER_HIGH, pdev->name, dd);
+}
+
+static inline void msm_spi_disable_irqs(struct msm_spi *dd)
+{
+	disable_irq(dd->irq_in);
+}
+
+static inline void msm_spi_enable_irqs(struct msm_spi *dd)
+{
+	enable_irq(dd->irq_in);
+}
+
+static inline void msm_spi_get_clk_err(struct msm_spi *dd, u32 *spi_err)
+{
+	*spi_err = readl_relaxed(dd->base + QUP_ERROR_FLAGS);
+}
+
+static inline void msm_spi_ack_clk_err(struct msm_spi *dd)
+{
+	writel_relaxed(QUP_ERR_MASK, dd->base + QUP_ERROR_FLAGS);
+}
+
+static inline void
+msm_spi_set_bpw_and_no_io_flags(struct msm_spi *dd, u32 *config, int n);
+
+/**
+ * msm_spi_set_qup_config: set QUP_CONFIG to no_input, no_output, and N bits
+ */
+static inline void msm_spi_set_qup_config(struct msm_spi *dd, int bpw)
+{
+	u32 qup_config = readl_relaxed(dd->base + QUP_CONFIG);
+
+	msm_spi_set_bpw_and_no_io_flags(dd, &qup_config, bpw-1);
+	writel_relaxed(qup_config | QUP_CONFIG_SPI_MODE, dd->base + QUP_CONFIG);
+}
+
+static inline int msm_spi_prepare_for_write(struct msm_spi *dd)
+{
+	if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
+		return -EINVAL;
+	if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
+		return -EINVAL;
+	return 0;
+}
+
+static inline void msm_spi_start_write(struct msm_spi *dd, u32 read_count)
+{
+	msm_spi_write_rmn_to_fifo(dd);
+}
+
+static inline void msm_spi_set_write_count(struct msm_spi *dd, int val)
+{
+	writel_relaxed(val, dd->base + QUP_MX_WRITE_COUNT);
+}
+
+static inline void msm_spi_complete(struct msm_spi *dd)
+{
+	dd->tx_done = true;
+	dd->rx_done = true;
+}
+
+static inline void msm_spi_enable_error_flags(struct msm_spi *dd)
+{
+	if (dd->qup_ver == SPI_QUP_VERSION_BFAM)
+		writel_relaxed(
+			SPI_ERR_CLK_UNDER_RUN_ERR | SPI_ERR_CLK_OVER_RUN_ERR,
+			dd->base + SPI_ERROR_FLAGS_EN);
+	else
+		writel_relaxed(0x00000078, dd->base + SPI_ERROR_FLAGS_EN);
+}
+
+static inline void msm_spi_clear_error_flags(struct msm_spi *dd)
+{
+	if (dd->qup_ver == SPI_QUP_VERSION_BFAM)
+		writel_relaxed(
+			SPI_ERR_CLK_UNDER_RUN_ERR | SPI_ERR_CLK_OVER_RUN_ERR,
+			dd->base + SPI_ERROR_FLAGS);
+	else
+		writel_relaxed(0x0000007C, dd->base + SPI_ERROR_FLAGS);
+}
+
+#endif
+#endif
diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c
index 5216bc7..8e73f4e 100644
--- a/drivers/usb/phy/phy-msm-qusb.c
+++ b/drivers/usb/phy/phy-msm-qusb.c
@@ -640,11 +640,22 @@ static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
 	u32 linestate = 0, intr_mask = 0;
 
 	if (qphy->suspended == suspend) {
+		/*
+		 * PHY_SUS_OVERRIDE is set when there is a cable
+		 * disconnect and the previous suspend call was because
+		 * of EUD spoof disconnect. Override this check and
+		 * ensure that the PHY is properly put in low power
+		 * mode.
+		 */
+		if (qphy->phy.flags & PHY_SUS_OVERRIDE)
+			goto suspend;
+
 		dev_dbg(phy->dev, "%s: USB PHY is already suspended\n",
 			__func__);
 		return 0;
 	}
 
+suspend:
 	if (suspend) {
 		/* Bus suspend case */
 		if (qphy->cable_connected) {
@@ -697,8 +708,7 @@ static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
 			writel_relaxed(0x00,
 				qphy->base + QUSB2PHY_PORT_INTR_CTRL);
 
-			if (!qphy->eud_enable_reg ||
-					!readl_relaxed(qphy->eud_enable_reg)) {
+			if (!(qphy->phy.flags & EUD_SPOOF_DISCONNECT)) {
 				/* Disable PHY */
 				writel_relaxed(POWER_DOWN |
 					readl_relaxed(qphy->base +
@@ -710,10 +720,11 @@ static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
 				if (qphy->tcsr_clamp_dig_n)
 					writel_relaxed(0x0,
 						qphy->tcsr_clamp_dig_n);
+
+				qusb_phy_enable_clocks(qphy, false);
+				qusb_phy_enable_power(qphy, false);
 			}
 
-			qusb_phy_enable_clocks(qphy, false);
-			qusb_phy_enable_power(qphy, false);
 			mutex_unlock(&qphy->phy_lock);
 
 			/*
@@ -1695,6 +1706,14 @@ static int qusb_phy_probe(struct platform_device *pdev)
 
 	qphy->suspended = true;
 
+	/*
+	 * EUD may be enable in boot loader and to keep EUD session alive across
+	 * kernel boot till USB phy driver is initialized based on cable status,
+	 * keep LDOs on here.
+	 */
+	if (qphy->eud_enable_reg && readl_relaxed(qphy->eud_enable_reg))
+		qusb_phy_enable_power(qphy, true);
+
 	if (of_property_read_bool(dev->of_node, "extcon")) {
 		qphy->id_state = true;
 		qphy->vbus_active = false;
diff --git a/include/dt-bindings/msm/power-on.h b/include/dt-bindings/msm/power-on.h
new file mode 100644
index 0000000..c43b89b
--- /dev/null
+++ b/include/dt-bindings/msm/power-on.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015, 2017, 2019-2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_POWER_ON_H__
+#define __MSM_POWER_ON_H__
+
+#define PON_POWER_OFF_RESERVED		0x00
+#define PON_POWER_OFF_WARM_RESET	0x01
+#define PON_POWER_OFF_SHUTDOWN		0x04
+#define PON_POWER_OFF_DVDD_SHUTDOWN	0x05
+#define PON_POWER_OFF_HARD_RESET	0x07
+#define PON_POWER_OFF_DVDD_HARD_RESET	0x08
+#define PON_POWER_OFF_MAX_TYPE		0x10
+
+#endif
diff --git a/include/linux/hwkm.h b/include/linux/hwkm.h
new file mode 100644
index 0000000..4af06b9
--- /dev/null
+++ b/include/linux/hwkm.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __HWKM_H_
+#define __HWKM_H_
+
+#include <stdbool.h>
+#include <stddef.h>
+
+/* Maximum number of bytes in a key used in a KEY_SLOT_RDWR operation */
+#define HWKM_MAX_KEY_SIZE 32
+/* Maximum number of bytes in a SW ctx used in a SYSTEM_KDF operation */
+#define HWKM_MAX_CTX_SIZE 64
+/* Maximum number of bytes in a WKB used in a key wrap or unwrap operation */
+#define HWKM_MAX_BLOB_SIZE 68
+
+
+/* Opcodes to be set in the op field of a command */
+enum hwkm_op {
+	/* Opcode to generate a random key */
+	NIST_KEYGEN = 0,
+	/* Opcode to derive a key */
+	SYSTEM_KDF,
+	/* Used only by HW */
+	QFPROM_KEY_RDWR,
+	/* Opcode to wrap a key and export the wrapped key */
+	KEY_WRAP_EXPORT,
+	/*
+	 * Opcode to import a wrapped key and unwrap it in the
+	 * specified key slot
+	 */
+	KEY_UNWRAP_IMPORT,
+	/* Opcode to clear a slot */
+	KEY_SLOT_CLEAR,
+	/* Opcode to read or write a key from/to a slot */
+	KEY_SLOT_RDWR,
+	/*
+	 * Opcode to broadcast a TPKEY to all slaves configured
+	 * to receive a TPKEY.
+	 */
+	SET_TPKEY,
+
+
+	HWKM_MAX_OP,
+	HWKM_UNDEF_OP = 0xFF
+};
+
+/*
+ * Algorithm values which can be used in the alg_allowed field of the
+ * key policy.
+ */
+enum hwkm_alg {
+	AES128_ECB = 0,
+	AES256_ECB = 1,
+	DES_ECB = 2,
+	TDES_ECB = 3,
+	AES128_CBC = 4,
+	AES256_CBC = 5,
+	DES_CBC = 6,
+	TDES_CBC = 7,
+	AES128_CCM_TC = 8,
+	AES128_CCM_NTC = 9,
+	AES256_CCM_TC = 10,
+	AES256_CCM_NTC = 11,
+	AES256_SIV = 12,
+	AES128_CTR = 13,
+	AES256_CTR = 14,
+	AES128_XTS = 15,
+	AES256_XTS = 16,
+	SHA1_HMAC = 17,
+	SHA256_HMAC = 18,
+	AES128_CMAC = 19,
+	AES256_CMAC = 20,
+	SHA384_HMAC = 21,
+	SHA512_HMAC = 22,
+	AES128_GCM = 23,
+	AES256_GCM = 24,
+	KASUMI = 25,
+	SNOW3G = 26,
+	ZUC = 27,
+	PRINCE = 28,
+	SIPHASH = 29,
+	QARMA64 = 30,
+	QARMA128 = 31,
+
+	HWKM_ALG_MAX,
+
+	HWKM_UNDEF_ALG = 0xFF
+};
+
+/* Key type values which can be used in the key_type field of the key policy */
+enum hwkm_type {
+	KEY_DERIVATION_KEY = 0,
+	KEY_WRAPPING_KEY = 1,
+	KEY_SWAPPING_KEY = 2,
+	TRANSPORT_KEY = 3,
+	GENERIC_KEY = 4,
+
+	HWKM_TYPE_MAX,
+
+	HWKM_UNDEF_KEY_TYPE = 0xFF
+};
+
+/* Destinations which a context can use */
+enum hwkm_destination {
+	KM_MASTER = 0,
+	GPCE_SLAVE = 1,
+	MCE_SLAVE = 2,
+	PIMEM_SLAVE = 3,
+	ICE0_SLAVE = 4,
+	ICE1_SLAVE = 5,
+	ICE2_SLAVE = 6,
+	ICE3_SLAVE = 7,
+	DP0_HDCP_SLAVE = 8,
+	DP1_HDCP_SLAVE = 9,
+	ICEMEM_SLAVE = 10,
+
+	HWKM_DESTINATION_MAX,
+
+	HWKM_UNDEF_DESTINATION = 0xFF
+};
+
+/*
+ * Key security levels which can be set in the security_lvl field of
+ * key policy.
+ */
+enum hwkm_security_level {
+	/* Can be read by SW in plaintext using KEY_SLOT_RDWR cmd. */
+	SW_KEY = 0,
+	/* Usable by SW, but not readable in plaintext. */
+	MANAGED_KEY = 1,
+	/* Not usable by SW. */
+	HW_KEY = 2,
+
+	HWKM_SECURITY_LEVEL_MAX,
+
+	HWKM_UNDEF_SECURITY_LEVEL = 0xFF
+};
+
+struct hwkm_key_policy {
+	bool km_by_spu_allowed;
+	bool km_by_modem_allowed;
+	bool km_by_nsec_allowed;
+	bool km_by_tz_allowed;
+
+	enum hwkm_alg alg_allowed;
+
+	bool enc_allowed;
+	bool dec_allowed;
+
+	enum hwkm_type key_type;
+	u8 kdf_depth;
+
+	bool wrap_export_allowed;
+	bool swap_export_allowed;
+
+	enum hwkm_security_level security_lvl;
+
+	enum hwkm_destination hw_destination;
+
+	bool wrap_with_tpk_allowed;
+};
+
+struct hwkm_bsve {
+	bool enabled;
+	bool km_key_policy_ver_en;
+	bool km_apps_secure_en;
+	bool km_msa_secure_en;
+	bool km_lcm_fuse_en;
+	bool km_boot_stage_otp_en;
+	bool km_swc_en;
+	bool km_child_key_policy_en;
+	bool km_mks_en;
+	u64 km_fuse_region_sha_digest_en;
+};
+
+struct hwkm_keygen_cmd {
+	u8 dks;					/* Destination Key Slot */
+	struct hwkm_key_policy policy;		/* Key policy */
+};
+
+struct hwkm_rdwr_cmd {
+	uint8_t slot;			/* Key Slot */
+	bool is_write;			/* Write or read op */
+	struct hwkm_key_policy policy;	/* Key policy for write */
+	uint8_t key[HWKM_MAX_KEY_SIZE];	/* Key for write */
+	size_t sz;			/* Length of key in bytes */
+};
+
+struct hwkm_kdf_cmd {
+	uint8_t dks;			/* Destination Key Slot */
+	uint8_t kdk;			/* Key Derivation Key Slot */
+	uint8_t mks;			/* Mixing key slot (bsve controlled) */
+	struct hwkm_key_policy policy;	/* Key policy. */
+	struct hwkm_bsve bsve;		/* Binding state vector */
+	uint8_t ctx[HWKM_MAX_CTX_SIZE];	/* Context */
+	size_t sz;			/* Length of context in bytes */
+};
+
+struct hwkm_set_tpkey_cmd {
+	uint8_t sks;			/* The slot to use as the TPKEY */
+};
+
+struct hwkm_unwrap_cmd {
+	uint8_t dks;			/* Destination Key Slot */
+	uint8_t kwk;			/* Key Wrapping Key Slot */
+	uint8_t wkb[HWKM_MAX_BLOB_SIZE];/* Wrapped Key Blob */
+	uint8_t sz;			/* Length of WKB in bytes */
+};
+
+struct hwkm_wrap_cmd {
+	uint8_t sks;			/* Destination Key Slot */
+	uint8_t kwk;			/* Key Wrapping Key Slot */
+	struct hwkm_bsve bsve;		/* Binding state vector */
+};
+
+struct hwkm_clear_cmd {
+	uint8_t dks;			/* Destination key slot */
+	bool is_double_key;		/* Whether this is a double key */
+};
+
+
+struct hwkm_cmd {
+	enum hwkm_op op;		/* Operation */
+	union /* Structs with opcode specific parameters */
+	{
+		struct hwkm_keygen_cmd keygen;
+		struct hwkm_rdwr_cmd rdwr;
+		struct hwkm_kdf_cmd kdf;
+		struct hwkm_set_tpkey_cmd set_tpkey;
+		struct hwkm_unwrap_cmd unwrap;
+		struct hwkm_wrap_cmd wrap;
+		struct hwkm_clear_cmd clear;
+	};
+};
+
+struct hwkm_rdwr_rsp {
+	struct hwkm_key_policy policy;	/* Key policy for read */
+	uint8_t key[HWKM_MAX_KEY_SIZE];	/* Only available for read op */
+	size_t sz;			/* Length of the key (bytes) */
+};
+
+struct hwkm_wrap_rsp {
+	uint8_t wkb[HWKM_MAX_BLOB_SIZE];	/* Wrapping key blob */
+	size_t sz;				/* key blob len (bytes) */
+};
+
+struct hwkm_rsp {
+	u32 status;
+	union /* Structs with opcode specific outputs */
+	{
+		struct hwkm_rdwr_rsp rdwr;
+		struct hwkm_wrap_rsp wrap;
+	};
+};
+
+enum hwkm_master_key_slots {
+	/** L1 KDKs. Not usable by SW. Used by HW to derive L2 KDKs */
+	NKDK_L1 = 0,
+	PKDK_L1 = 1,
+	SKDK_L1 = 2,
+	UKDK_L1 = 3,
+
+	/*
+	 * L2 KDKs, used to derive keys by SW.
+	 * Cannot be used for crypto, only key derivation
+	 */
+	TZ_NKDK_L2 = 4,
+	TZ_PKDK_L2 = 5,
+	TZ_SKDK_L2 = 6,
+	MODEM_PKDK_L2 = 7,
+	MODEM_SKDK_L2 = 8,
+	TZ_UKDK_L2 = 9,
+
+	/** Slots reserved for TPKEY */
+	TPKEY_EVEN_SLOT = 10,
+	TPKEY_KEY_ODD_SLOT = 11,
+
+	/** First key slot available for general purpose use cases */
+	MASTER_GENERIC_SLOTS_START,
+
+	UNDEF_SLOT = 0xFF
+};
+
+#if IS_ENABLED(CONFIG_QTI_HW_KEY_MANAGER)
+int qti_hwkm_handle_cmd(struct hwkm_cmd *cmd, struct hwkm_rsp *rsp);
+int qti_hwkm_clocks(bool on);
+int qti_hwkm_init(void);
+#else
+static inline int qti_hwkm_add_req(struct hwkm_cmd *cmd,
+				   struct hwkm_rsp *rsp)
+{
+	return -EOPNOTSUPP;
+}
+static inline int qti_hwkm_clocks(bool on)
+{
+	return -EOPNOTSUPP;
+}
+static inline int qti_hwkm_init(void)
+{
+	return -EOPNOTSUPP;
+}
+#endif /* CONFIG_QTI_HW_KEY_MANAGER */
+#endif /* __HWKM_H_ */
diff --git a/include/linux/spi/qcom-spi.h b/include/linux/spi/qcom-spi.h
new file mode 100644
index 0000000..1888fe5
--- /dev/null
+++ b/include/linux/spi/qcom-spi.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2014-2018, 2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * SPI driver for Qualcomm Technologies, Inc. MSM platforms.
+ */
+
+/**
+ * msm_spi_platform_data: msm spi-controller's configuration data
+ *
+ * @max_clock_speed max spi clock speed
+ * @active_only when set, votes when system active and removes the vote when
+ *       system goes idle (optimises for performance). When unset, voting using
+ *       runtime pm (optimizes for power).
+ * @master_id master id number of the controller's wrapper (BLSP or GSBI).
+ *       When zero, clock path voting is disabled.
+ * @gpio_config pointer to function for configuring gpio
+ * @gpio_release pointer to function for releasing gpio pins
+ * @dma_config function poniter for configuring dma engine
+ * @pm_lat power management latency
+ * @infinite_mode use FIFO mode in infinite mode
+ * @ver_reg_exists if the version register exists
+ * @use_beam true if BAM is available
+ * @bam_consumer_pipe_index BAM conusmer pipe
+ * @bam_producer_pipe_index BAM producer pipe
+ * @rt_priority true if RT thread
+ * @use_pinctrl true if pinctrl library is used
+ * @is_shared true when qup is shared between ee's
+ */
+struct msm_spi_platform_data {
+	u32 max_clock_speed;
+	u32  master_id;
+	u32 bus_width;
+	int (*gpio_config)(void);
+	void (*gpio_release)(void);
+	int (*dma_config)(void);
+	const char *rsl_id;
+	u32  pm_lat;
+	u32  infinite_mode;
+	bool ver_reg_exists;
+	bool use_bam;
+	u32  bam_consumer_pipe_index;
+	u32  bam_producer_pipe_index;
+	bool rt_priority;
+	bool use_pinctrl;
+	bool is_shared;
+};
diff --git a/include/soc/qcom/spm.h b/include/soc/qcom/spm.h
new file mode 100644
index 0000000..9978bba
--- /dev/null
+++ b/include/soc/qcom/spm.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2010-2017, 2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_SPM_H
+#define __ARCH_ARM_MACH_MSM_SPM_H
+
+enum {
+	MSM_SPM_MODE_DISABLED,
+	MSM_SPM_MODE_CLOCK_GATING,
+	MSM_SPM_MODE_RETENTION,
+	MSM_SPM_MODE_GDHS,
+	MSM_SPM_MODE_POWER_COLLAPSE,
+	MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE,
+	MSM_SPM_MODE_FASTPC,
+	MSM_SPM_MODE_NR
+};
+
+enum msm_spm_avs_irq {
+	MSM_SPM_AVS_IRQ_MIN,
+	MSM_SPM_AVS_IRQ_MAX,
+};
+
+struct msm_spm_device;
+struct device_node;
+
+#if defined(CONFIG_MSM_SPM)
+
+int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm);
+void msm_spm_set_rpm_hs(bool allow_rpm_hs);
+int msm_spm_probe_done(void);
+int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel);
+int msm_spm_get_vdd(unsigned int cpu);
+int msm_spm_turn_on_cpu_rail(struct device_node *l2ccc_node,
+		unsigned int val, int cpu, int vctl_offset);
+struct msm_spm_device *msm_spm_get_device_by_name(const char *name);
+int msm_spm_config_low_power_mode(struct msm_spm_device *dev,
+		unsigned int mode, bool notify_rpm);
+int msm_spm_config_low_power_mode_addr(struct msm_spm_device *dev,
+		unsigned int mode, bool notify_rpm);
+int msm_spm_device_init(void);
+bool msm_spm_is_mode_avail(unsigned int mode);
+void msm_spm_dump_regs(unsigned int cpu);
+int msm_spm_is_avs_enabled(unsigned int cpu);
+int msm_spm_avs_enable(unsigned int cpu);
+int msm_spm_avs_disable(unsigned int cpu);
+int msm_spm_avs_set_limit(unsigned int cpu, uint32_t min_lvl,
+		uint32_t max_lvl);
+int msm_spm_avs_enable_irq(unsigned int cpu, enum msm_spm_avs_irq irq);
+int msm_spm_avs_disable_irq(unsigned int cpu, enum msm_spm_avs_irq irq);
+int msm_spm_avs_clear_irq(unsigned int cpu, enum msm_spm_avs_irq irq);
+
+#if defined(CONFIG_MSM_L2_SPM)
+
+/* Public functions */
+
+int msm_spm_apcs_set_phase(int cpu, unsigned int phase_cnt);
+int msm_spm_enable_fts_lpm(int cpu, uint32_t mode);
+
+#else
+
+static inline int msm_spm_apcs_set_phase(int cpu, unsigned int phase_cnt)
+{
+	return -ENODEV;
+}
+
+static inline int msm_spm_enable_fts_lpm(int cpu, uint32_t mode)
+{
+	return -ENODEV;
+}
+#endif /* defined(CONFIG_MSM_L2_SPM) */
+#else /* defined(CONFIG_MSM_SPM) */
+static inline int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm)
+{
+	return -ENODEV;
+}
+
+static inline void msm_spm_set_rpm_hs(bool allow_rpm_hs) {}
+
+static inline int msm_spm_probe_done(void)
+{
+	return -ENODEV;
+}
+
+static inline int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel)
+{
+	return -ENODEV;
+}
+
+static inline int msm_spm_get_vdd(unsigned int cpu)
+{
+	return 0;
+}
+
+static inline int msm_spm_turn_on_cpu_rail(struct device_node *l2ccc_node,
+		unsigned int val, int cpu, int vctl_offset)
+{
+	return -ENODEV;
+}
+
+static inline int msm_spm_device_init(void)
+{
+	return -ENODEV;
+}
+
+static inline void msm_spm_dump_regs(unsigned int cpu)
+{ }
+
+static inline int msm_spm_config_low_power_mode(struct msm_spm_device *dev,
+		unsigned int mode, bool notify_rpm)
+{
+	return -ENODEV;
+}
+
+static inline int msm_spm_config_low_power_mode_addr(
+	struct msm_spm_device *dev, unsigned int mode, bool notify_rpm)
+{
+	return -ENODEV;
+}
+
+static inline struct msm_spm_device *msm_spm_get_device_by_name(
+				const char *name)
+{
+	return NULL;
+}
+
+static inline bool msm_spm_is_mode_avail(unsigned int mode)
+{
+	return false;
+}
+
+static inline int msm_spm_is_avs_enabled(unsigned int cpu)
+{
+	return -ENODEV;
+}
+
+static inline int msm_spm_avs_enable(unsigned int cpu)
+{
+	return -ENODEV;
+}
+
+static inline int msm_spm_avs_disable(unsigned int cpu)
+{
+	return -ENODEV;
+}
+
+static inline int msm_spm_avs_set_limit(unsigned int cpu, uint32_t min_lvl,
+		uint32_t max_lvl)
+{
+	return -ENODEV;
+}
+
+static inline int msm_spm_avs_enable_irq(unsigned int cpu,
+		enum msm_spm_avs_irq irq)
+{
+	return -ENODEV;
+}
+
+static inline int msm_spm_avs_disable_irq(unsigned int cpu,
+		enum msm_spm_avs_irq irq)
+{
+	return -ENODEV;
+}
+
+static inline int msm_spm_avs_clear_irq(unsigned int cpu,
+		enum msm_spm_avs_irq irq)
+{
+	return -ENODEV;
+}
+
+#endif  /* defined (CONFIG_MSM_SPM) */
+#endif  /* __ARCH_ARM_MACH_MSM_SPM_H */
diff --git a/include/uapi/linux/taskstats.h b/include/uapi/linux/taskstats.h
index 125b5a9..ac1e188 100644
--- a/include/uapi/linux/taskstats.h
+++ b/include/uapi/linux/taskstats.h
@@ -35,7 +35,7 @@
 
 
 #define TASKSTATS_VERSION	9
-#define TASKSTATS2_VERSION	1
+#define TASKSTATS2_VERSION	2
 #define TS_COMM_LEN		32	/* should be >= TASK_COMM_LEN
 					 * in linux/sched.h */
 
@@ -181,6 +181,20 @@ struct taskstats2 {
 	__u64 shmem_rss;	/* KB */
 	__u64 unreclaimable;	/* KB */
 	/* version 1 ends here */
+
+	/* version 2 begins here */
+	__u64	utime;		/* User CPU time [usec] */
+	__u64	stime;		/* System CPU time [usec] */
+	__u64	cutime;		/* Cumulative User CPU time [usec] */
+	__u64	cstime;		/* Cumulative System CPU time [usec] */
+
+	__u32	uid __attribute__((aligned(8)));
+					/* User ID */
+	__u32	ppid;  /* Parent process ID */
+	char	name[TS_COMM_LEN];  /* Command name */
+	char    state[TS_COMM_LEN]; /* Process state */
+	/* version 2 ends here*/
+
 };
 
 /*
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index f605007..7369a22 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -1003,6 +1003,9 @@ enum v4l2_mpeg_vidc_video_roi_type {
 	V4L2_CID_MPEG_VIDC_VIDEO_ROI_TYPE_2BYTE = 2,
 };
 
+#define V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_HINT \
+	(V4L2_CID_MPEG_MSM_VIDC_BASE + 133)
+
 /*  Camera class control IDs */
 
 #define V4L2_CID_CAMERA_CLASS_BASE	(V4L2_CTRL_CLASS_CAMERA | 0x900)
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index d5b5f75..4b0c22d 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -650,6 +650,11 @@ static int taskstats2_cmd_attr_pid(struct genl_info *info)
 	size_t size;
 	u32 pid;
 	int rc;
+	u64 utime, stime;
+	const struct cred *tcred;
+	struct cgroup_subsys_state *css;
+	unsigned long flags;
+	struct signal_struct *sig;
 
 	size = nla_total_size_64bit(sizeof(struct taskstats2));
 
@@ -691,6 +696,35 @@ static int taskstats2_cmd_attr_pid(struct genl_info *info)
 #undef K
 		task_unlock(p);
 	}
+
+	/* version 2 fields begin here */
+	task_cputime(tsk, &utime, &stime);
+	stats->utime = div_u64(utime, NSEC_PER_USEC);
+	stats->stime = div_u64(stime, NSEC_PER_USEC);
+
+	if (lock_task_sighand(tsk, &flags)) {
+		sig = tsk->signal;
+		stats->cutime = sig->cutime;
+		stats->cstime = sig->cstime;
+		unlock_task_sighand(tsk, &flags);
+	}
+
+	rcu_read_lock();
+	tcred = __task_cred(tsk);
+	stats->uid = from_kuid_munged(current_user_ns(), tcred->uid);
+	stats->ppid = pid_alive(tsk) ?
+		task_tgid_nr_ns(rcu_dereference(tsk->real_parent),
+			task_active_pid_ns(current)) : 0;
+	rcu_read_unlock();
+
+	strlcpy(stats->name, tsk->comm, sizeof(stats->name));
+
+	css = task_get_css(tsk, cpuset_cgrp_id);
+	cgroup_path_ns(css->cgroup, stats->state, sizeof(stats->state),
+				current->nsproxy->cgroup_ns);
+	css_put(css);
+	/* version 2 fields end here */
+
 	put_task_struct(tsk);
 
 	return send_reply(rep_skb, info);
diff --git a/sound/usb/usb_audio_qmi_svc.c b/sound/usb/usb_audio_qmi_svc.c
index 5d4758c..5b38bcae 100644
--- a/sound/usb/usb_audio_qmi_svc.c
+++ b/sound/usb/usb_audio_qmi_svc.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/module.h>
@@ -31,6 +31,7 @@
 #define BUS_INTERVAL_FULL_SPEED 1000 /* in us */
 #define BUS_INTERVAL_HIGHSPEED_AND_ABOVE 125 /* in us */
 #define MAX_BINTERVAL_ISOC_EP 16
+#define DEV_RELEASE_WAIT_TIMEOUT 10000 /* in ms */
 
 #define SND_PCM_CARD_NUM_MASK 0xffff0000
 #define SND_PCM_DEV_NUM_MASK 0xff00
@@ -890,12 +891,14 @@ static void uaudio_disconnect_cb(struct snd_usb_audio *chip)
 		if (ret < 0)
 			uaudio_err("qmi send failed with err: %d\n", ret);
 
-		ret = wait_event_interruptible(dev->disconnect_wq,
-				!atomic_read(&dev->in_use));
-		if (ret < 0) {
-			uaudio_dbg("failed with ret %d\n", ret);
-			return;
-		}
+		ret = wait_event_interruptible_timeout(dev->disconnect_wq,
+				!atomic_read(&dev->in_use),
+				msecs_to_jiffies(DEV_RELEASE_WAIT_TIMEOUT));
+		if (!ret)
+			uaudio_err("timeout while waiting for dev_release\n");
+		else if (ret < 0)
+			uaudio_err("failed with ret %d\n", ret);
+
 		mutex_lock(&chip->dev_lock);
 	}
 
@@ -1154,13 +1157,17 @@ static void handle_uaudio_stream_req(struct qmi_handle *handle,
 
 response:
 	if (!req_msg->enable && ret != -EINVAL) {
-		if (info_idx >= 0) {
-			mutex_lock(&chip->dev_lock);
-			info = &uadev[pcm_card_num].info[info_idx];
-			uaudio_dev_intf_cleanup(uadev[pcm_card_num].udev, info);
-			uaudio_dbg("release resources: intf# %d card# %d\n",
-					subs->interface, pcm_card_num);
-			mutex_unlock(&chip->dev_lock);
+		if (ret != -ENODEV) {
+			if (info_idx >= 0) {
+				mutex_lock(&chip->dev_lock);
+				info = &uadev[pcm_card_num].info[info_idx];
+				uaudio_dev_intf_cleanup(
+						uadev[pcm_card_num].udev,
+						info);
+				uaudio_dbg("release resources: intf# %d card# %d\n",
+						subs->interface, pcm_card_num);
+				mutex_unlock(&chip->dev_lock);
+			}
 		}
 		if (atomic_read(&uadev[pcm_card_num].in_use))
 			kref_put(&uadev[pcm_card_num].kref,