Merge "defconfig: msm: Enable harden branch predictor support for Bengal"
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index fab641a..8923a10 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -3959,6 +3959,13 @@
 			Run specified binary instead of /init from the ramdisk,
 			used for early userspace startup. See initrd.
 
+	rdrand=		[X86]
+			force - Override the decision by the kernel to hide the
+				advertisement of RDRAND support (this affects
+				certain AMD processors because of buggy BIOS
+				support, specifically around the suspend/resume
+				path).
+
 	rdt=		[HW,X86,RDT]
 			Turn on/off individual RDT features. List is:
 			cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, l2cdp,
diff --git a/Makefile b/Makefile
index 47b7619..0cda102 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 4
 PATCHLEVEL = 19
-SUBLEVEL = 66
+SUBLEVEL = 69
 EXTRAVERSION =
 NAME = "People's Front"
 
diff --git a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
index 36efe41..9e33c41 100644
--- a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
+++ b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
@@ -125,6 +125,9 @@
 	};
 
 	mdio-bus-mux {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
 		/* BIT(9) = 1 => external mdio */
 		mdio_ext: mdio@200 {
 			reg = <0x200>;
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index fd6cde2..871fa50 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -658,13 +658,22 @@ int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
 }
 
 static void reset_coproc_regs(struct kvm_vcpu *vcpu,
-			      const struct coproc_reg *table, size_t num)
+			      const struct coproc_reg *table, size_t num,
+			      unsigned long *bmap)
 {
 	unsigned long i;
 
 	for (i = 0; i < num; i++)
-		if (table[i].reset)
+		if (table[i].reset) {
+			int reg = table[i].reg;
+
 			table[i].reset(vcpu, &table[i]);
+			if (reg > 0 && reg < NR_CP15_REGS) {
+				set_bit(reg, bmap);
+				if (table[i].is_64bit)
+					set_bit(reg + 1, bmap);
+			}
+		}
 }
 
 static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu)
@@ -1439,17 +1448,15 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
 {
 	size_t num;
 	const struct coproc_reg *table;
-
-	/* Catch someone adding a register without putting in reset entry. */
-	memset(vcpu->arch.ctxt.cp15, 0x42, sizeof(vcpu->arch.ctxt.cp15));
+	DECLARE_BITMAP(bmap, NR_CP15_REGS) = { 0, };
 
 	/* Generic chip reset first (so target could override). */
-	reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
+	reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs), bmap);
 
 	table = get_target_table(vcpu->arch.target, &num);
-	reset_coproc_regs(vcpu, table, num);
+	reset_coproc_regs(vcpu, table, num, bmap);
 
 	for (num = 1; num < NR_CP15_REGS; num++)
-		WARN(vcpu_cp15(vcpu, num) == 0x42424242,
+		WARN(!test_bit(num, bmap),
 		     "Didn't reset vcpu_cp15(vcpu, %zi)", num);
 }
diff --git a/arch/arm/mach-davinci/sleep.S b/arch/arm/mach-davinci/sleep.S
index cd350de..efcd400 100644
--- a/arch/arm/mach-davinci/sleep.S
+++ b/arch/arm/mach-davinci/sleep.S
@@ -37,6 +37,7 @@
 #define DEEPSLEEP_SLEEPENABLE_BIT	BIT(31)
 
 	.text
+	.arch	armv5te
 /*
  * Move DaVinci into deep sleep state
  *
diff --git a/arch/arm64/configs/vendor/bengal-perf_defconfig b/arch/arm64/configs/vendor/bengal-perf_defconfig
new file mode 100644
index 0000000..d06d4f6
--- /dev/null
+++ b/arch/arm64/configs/vendor/bengal-perf_defconfig
@@ -0,0 +1,528 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_SCHED_WALT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_BPF=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+# CONFIG_FHANDLE is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB_FREELIST_RANDOM=y
+CONFIG_SLAB_FREELIST_HARDENED=y
+CONFIG_PROFILING=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_BENGAL=y
+CONFIG_PCI=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_HZ_100=y
+CONFIG_SECCOMP=y
+# CONFIG_UNMAP_KERNEL_AT_EL0 is not set
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+# CONFIG_ARM64_VHE is not set
+CONFIG_RANDOMIZE_BASE=y
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_ENERGY_MODEL=y
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_PANIC_ON_REFCOUNT_ERROR=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_CFQ_GROUP_IOSCHED=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_CMA=y
+CONFIG_ZSMALLOC=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_IP_SCTP=y
+CONFIG_L2TP=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_NETEM=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_QRTR=y
+CONFIG_QRTR_SMD=y
+CONFIG_SOCKEV_NLMCAST=y
+CONFIG_BT=y
+CONFIG_CFG80211=y
+CONFIG_RFKILL=y
+CONFIG_FW_LOADER_USER_HELPER=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_DMA_CMA=y
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_UID_SYS_STATS=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_RMNET=y
+CONFIG_PHYLIB=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS_GENL=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+CONFIG_SERIAL_MSM_GENI=y
+CONFIG_SERIAL_MSM_GENI_HALF_SAMPLING=y
+CONFIG_HW_RANDOM=y
+CONFIG_DIAG_CHAR=y
+CONFIG_MSM_ADSPRPC=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_QCOM_GENI=y
+CONFIG_SPI=y
+CONFIG_SPI_QCOM_GENI=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_PINCTRL_BENGAL=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=y
+CONFIG_THERMAL_TSENS=y
+CONFIG_QTI_ADC_TM=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_V4L_TEST_DRIVERS=y
+CONFIG_VIDEO_VIM2M=y
+CONFIG_VIDEO_VICODEC=y
+CONFIG_DRM=y
+# CONFIG_DRM_MSM is not set
+CONFIG_FB_VIRTUAL=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_LINK_LAYER_TEST=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_QCOM_EMU_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=900
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_TEST=y
+CONFIG_MMC_IPC_LOGGING=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_RTC_CLASS=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_GPI_DMA=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ION=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_IPA3=y
+CONFIG_IPA_WDI_UNIFIED_API=y
+CONFIG_RMNET_IPA3=y
+CONFIG_RNDIS_IPA=y
+CONFIG_IPA_UT=y
+CONFIG_QCOM_GENI_SE=y
+CONFIG_SM_GPUCC_BENGAL=y
+CONFIG_SM_DISPCC_BENGAL=y
+CONFIG_HWSPINLOCK=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_MAILBOX=y
+CONFIG_QCOM_APCS_IPC=y
+CONFIG_MSM_QMP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_RPMSG_CHAR=y
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+CONFIG_RPMSG_QCOM_GLINK_SMEM=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_QCOM_MDT_LOADER=y
+CONFIG_QPNP_PBS=y
+CONFIG_QCOM_QMI_HELPERS=y
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_SMD_RPM=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_SETUP_SSR_NOTIF_TIMEOUTS=y
+CONFIG_SSR_SYSMON_NOTIF_TIMEOUT=20000
+CONFIG_SSR_SUBSYS_NOTIF_TIMEOUT=20000
+CONFIG_PANIC_ON_SSR_NOTIF_TIMEOUT=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_QMI_COMM=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_DCC_V2=y
+CONFIG_QCOM_EUD=y
+CONFIG_QCOM_MINIDUMP=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QCOM_GLINK=y
+CONFIG_QCOM_GLINK_PKT=y
+CONFIG_QCOM_SMP2P_SLEEPSTATE=y
+CONFIG_MSM_CDSP_LOADER=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_PM=y
+CONFIG_QTEE_SHM_BRIDGE=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
+CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_ICNSS=y
+CONFIG_ICNSS_QMI=y
+CONFIG_DEVFREQ_GOV_PASSIVE=y
+CONFIG_ARM_QCOM_DEVFREQ_FW=y
+CONFIG_IIO=y
+CONFIG_QCOM_SPMI_ADC5=y
+CONFIG_PWM=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_PHY_XGENE=y
+CONFIG_RAS=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_QCOM_QFPROM=y
+CONFIG_SLIMBUS=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_FS_ENCRYPTION=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_EFIVAR_FS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_HARDENED_USERCOPY_PAGESPAN=y
+CONFIG_FORTIFY_SOURCE=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCE=y
+CONFIG_XZ_DEC=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_PAGE_OWNER=y
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_PANIC_TIMEOUT=-1
+CONFIG_SCHEDSTATS=y
+CONFIG_IPC_LOGGING=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SOURCE_ETM4X=y
+CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_DUMMY=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
+CONFIG_CORESIGHT_TGU=y
diff --git a/arch/arm64/configs/vendor/bengal_defconfig b/arch/arm64/configs/vendor/bengal_defconfig
index f69e298..aa328e3 100644
--- a/arch/arm64/configs/vendor/bengal_defconfig
+++ b/arch/arm64/configs/vendor/bengal_defconfig
@@ -242,6 +242,7 @@
 CONFIG_DNS_RESOLVER=y
 CONFIG_QRTR=y
 CONFIG_QRTR_SMD=y
+CONFIG_SOCKEV_NLMCAST=y
 CONFIG_BT=y
 CONFIG_CFG80211=y
 CONFIG_RFKILL=y
@@ -265,6 +266,7 @@
 CONFIG_SCSI_UFS_QCOM=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_VERITY=y
 CONFIG_DM_VERITY_FEC=y
@@ -273,6 +275,7 @@
 CONFIG_DUMMY=y
 CONFIG_TUN=y
 CONFIG_RMNET=y
+CONFIG_PHYLIB=y
 CONFIG_PPP=y
 CONFIG_PPP_BSDCOMP=y
 CONFIG_PPP_DEFLATE=y
@@ -301,6 +304,7 @@
 # CONFIG_DEVMEM is not set
 CONFIG_SERIAL_MSM_GENI=y
 CONFIG_SERIAL_MSM_GENI_CONSOLE=y
+CONFIG_SERIAL_MSM_GENI_HALF_SAMPLING=y
 CONFIG_SERIAL_DEV_BUS=y
 CONFIG_TTY_PRINTK=y
 CONFIG_HW_RANDOM=y
@@ -412,10 +416,12 @@
 CONFIG_RNDIS_IPA=y
 CONFIG_IPA_UT=y
 CONFIG_QCOM_GENI_SE=y
-CONFIG_SM_GCC_BENGAL=y
+CONFIG_SM_GPUCC_BENGAL=y
+CONFIG_SM_DISPCC_BENGAL=y
 CONFIG_HWSPINLOCK=y
 CONFIG_HWSPINLOCK_QCOM=y
 CONFIG_MAILBOX=y
+CONFIG_QCOM_APCS_IPC=y
 CONFIG_MSM_QMP=y
 CONFIG_IOMMU_IO_PGTABLE_FAST=y
 CONFIG_ARM_SMMU=y
@@ -424,10 +430,12 @@
 CONFIG_IOMMU_DEBUG_TRACKING=y
 CONFIG_IOMMU_TESTS=y
 CONFIG_RPMSG_CHAR=y
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
 CONFIG_RPMSG_QCOM_GLINK_SMEM=y
 CONFIG_QCOM_COMMAND_DB=y
 CONFIG_QCOM_CPUSS_DUMP=y
 CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_QCOM_MDT_LOADER=y
 CONFIG_QPNP_PBS=y
 CONFIG_QCOM_QMI_HELPERS=y
 CONFIG_QCOM_SMEM=y
@@ -456,6 +464,7 @@
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_QCOM_GLINK=y
 CONFIG_QCOM_GLINK_PKT=y
+CONFIG_QCOM_SMP2P_SLEEPSTATE=y
 CONFIG_MSM_CDSP_LOADER=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_PM=y
@@ -475,11 +484,13 @@
 CONFIG_RAS=y
 CONFIG_ANDROID=y
 CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_QCOM_QFPROM=y
 CONFIG_SLIMBUS=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
 CONFIG_F2FS_FS=y
 CONFIG_F2FS_FS_SECURITY=y
+CONFIG_FS_ENCRYPTION=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
@@ -487,6 +498,8 @@
 CONFIG_OVERLAY_FS=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
 CONFIG_EFIVAR_FS=y
 CONFIG_ECRYPT_FS=y
 CONFIG_ECRYPT_FS_MESSAGING=y
diff --git a/arch/arm64/configs/vendor/kona-perf_defconfig b/arch/arm64/configs/vendor/kona-perf_defconfig
index c2279be..ad2767a 100644
--- a/arch/arm64/configs/vendor/kona-perf_defconfig
+++ b/arch/arm64/configs/vendor/kona-perf_defconfig
@@ -681,10 +681,10 @@
 CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y
 CONFIG_CORESIGHT_STM=y
 CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_CTI_SAVE_DISABLE=y
 CONFIG_CORESIGHT_TPDA=y
 CONFIG_CORESIGHT_TPDM=y
 CONFIG_CORESIGHT_HWEVENT=y
 CONFIG_CORESIGHT_DUMMY=y
 CONFIG_CORESIGHT_REMOTE_ETM=y
 CONFIG_CORESIGHT_TGU=y
-CONFIG_CORESIGHT_LINK_LATE_DISABLE=y
diff --git a/arch/arm64/configs/vendor/kona_defconfig b/arch/arm64/configs/vendor/kona_defconfig
index ef7e81b5..5e3ed0d 100644
--- a/arch/arm64/configs/vendor/kona_defconfig
+++ b/arch/arm64/configs/vendor/kona_defconfig
@@ -768,4 +768,3 @@
 CONFIG_CORESIGHT_DUMMY=y
 CONFIG_CORESIGHT_REMOTE_ETM=y
 CONFIG_CORESIGHT_TGU=y
-CONFIG_CORESIGHT_LINK_LATE_DISABLE=y
diff --git a/arch/arm64/configs/vendor/lito-perf_defconfig b/arch/arm64/configs/vendor/lito-perf_defconfig
index 5e47b38..a22ea19 100644
--- a/arch/arm64/configs/vendor/lito-perf_defconfig
+++ b/arch/arm64/configs/vendor/lito-perf_defconfig
@@ -190,6 +190,7 @@
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
 # CONFIG_NETFILTER_XT_MATCH_SCTP is not set
 CONFIG_NETFILTER_XT_MATCH_SOCKET=y
 CONFIG_NETFILTER_XT_MATCH_STATE=y
@@ -665,4 +666,3 @@
 CONFIG_CORESIGHT_DUMMY=y
 CONFIG_CORESIGHT_REMOTE_ETM=y
 CONFIG_CORESIGHT_TGU=y
-CONFIG_CORESIGHT_LINK_LATE_DISABLE=y
diff --git a/arch/arm64/configs/vendor/lito_defconfig b/arch/arm64/configs/vendor/lito_defconfig
index ffb5530..a228ca8 100644
--- a/arch/arm64/configs/vendor/lito_defconfig
+++ b/arch/arm64/configs/vendor/lito_defconfig
@@ -195,6 +195,7 @@
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
 # CONFIG_NETFILTER_XT_MATCH_SCTP is not set
 CONFIG_NETFILTER_XT_MATCH_SOCKET=y
 CONFIG_NETFILTER_XT_MATCH_STATE=y
@@ -732,4 +733,3 @@
 CONFIG_CORESIGHT_DUMMY=y
 CONFIG_CORESIGHT_REMOTE_ETM=y
 CONFIG_CORESIGHT_TGU=y
-CONFIG_CORESIGHT_LINK_LATE_DISABLE=y
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 7ed3208..f52a296 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -94,7 +94,11 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
 	((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
 
 #define alloc_screen_info(x...)		&screen_info
-#define free_screen_info(x...)
+
+static inline void free_screen_info(efi_system_table_t *sys_table_arg,
+				    struct screen_info *si)
+{
+}
 
 /* redeclare as 'hidden' so the compiler will generate relative references */
 extern struct screen_info screen_info __attribute__((__visibility__("hidden")));
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index f64d4e3..c9a1d5f 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -448,8 +448,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 				 PMD_TYPE_SECT)
 
 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
-#define pud_sect(pud)		(0)
-#define pud_table(pud)		(1)
+static inline bool pud_sect(pud_t pud) { return false; }
+static inline bool pud_table(pud_t pud) { return true; }
 #else
 #define pud_sect(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
 				 PUD_TYPE_SECT)
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 57e9622..7eff8af 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -76,7 +76,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 
 	if (offset < -SZ_128M || offset >= SZ_128M) {
 #ifdef CONFIG_ARM64_MODULE_PLTS
-		struct plt_entry trampoline;
+		struct plt_entry trampoline, *dst;
 		struct module *mod;
 
 		/*
@@ -104,24 +104,27 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 		 * is added in the future, but for now, the pr_err() below
 		 * deals with a theoretical issue only.
 		 */
+		dst = mod->arch.ftrace_trampoline;
 		trampoline = get_plt_entry(addr);
-		if (!plt_entries_equal(mod->arch.ftrace_trampoline,
-				       &trampoline)) {
-			if (!plt_entries_equal(mod->arch.ftrace_trampoline,
-					       &(struct plt_entry){})) {
+		if (!plt_entries_equal(dst, &trampoline)) {
+			if (!plt_entries_equal(dst, &(struct plt_entry){})) {
 				pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
 				return -EINVAL;
 			}
 
 			/* point the trampoline to our ftrace entry point */
 			module_disable_ro(mod);
-			*mod->arch.ftrace_trampoline = trampoline;
+			*dst = trampoline;
 			module_enable_ro(mod, true);
 
-			/* update trampoline before patching in the branch */
-			smp_wmb();
+			/*
+			 * Ensure updated trampoline is visible to instruction
+			 * fetch before we patch in the branch.
+			 */
+			__flush_icache_range((unsigned long)&dst[0],
+					     (unsigned long)&dst[1]);
 		}
-		addr = (unsigned long)(void *)mod->arch.ftrace_trampoline;
+		addr = (unsigned long)dst;
 #else /* CONFIG_ARM64_MODULE_PLTS */
 		return -EINVAL;
 #endif /* CONFIG_ARM64_MODULE_PLTS */
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
index 933adbc..0311fe5 100644
--- a/arch/arm64/kernel/return_address.c
+++ b/arch/arm64/kernel/return_address.c
@@ -11,6 +11,7 @@
 
 #include <linux/export.h>
 #include <linux/ftrace.h>
+#include <linux/kprobes.h>
 
 #include <asm/stack_pointer.h>
 #include <asm/stacktrace.h>
@@ -32,6 +33,7 @@ static int save_return_addr(struct stackframe *frame, void *d)
 		return 0;
 	}
 }
+NOKPROBE_SYMBOL(save_return_addr);
 
 void *return_address(unsigned int level)
 {
@@ -55,3 +57,4 @@ void *return_address(unsigned int level)
 		return NULL;
 }
 EXPORT_SYMBOL_GPL(return_address);
+NOKPROBE_SYMBOL(return_address);
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 4989f7e..bb482ec 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -18,6 +18,7 @@
 #include <linux/kernel.h>
 #include <linux/export.h>
 #include <linux/ftrace.h>
+#include <linux/kprobes.h>
 #include <linux/sched.h>
 #include <linux/sched/debug.h>
 #include <linux/sched/task_stack.h>
@@ -85,6 +86,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
 
 	return 0;
 }
+NOKPROBE_SYMBOL(unwind_frame);
 
 void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
 		     int (*fn)(struct stackframe *, void *), void *data)
@@ -99,6 +101,7 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
 			break;
 	}
 }
+NOKPROBE_SYMBOL(walk_stackframe);
 
 #ifdef CONFIG_STACKTRACE
 struct stack_trace_data {
diff --git a/arch/arm64/kvm/regmap.c b/arch/arm64/kvm/regmap.c
index 7a5173e..4c2e96e 100644
--- a/arch/arm64/kvm/regmap.c
+++ b/arch/arm64/kvm/regmap.c
@@ -189,13 +189,18 @@ void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v)
 	switch (spsr_idx) {
 	case KVM_SPSR_SVC:
 		write_sysreg_el1(v, spsr);
+		break;
 	case KVM_SPSR_ABT:
 		write_sysreg(v, spsr_abt);
+		break;
 	case KVM_SPSR_UND:
 		write_sysreg(v, spsr_und);
+		break;
 	case KVM_SPSR_IRQ:
 		write_sysreg(v, spsr_irq);
+		break;
 	case KVM_SPSR_FIQ:
 		write_sysreg(v, spsr_fiq);
+		break;
 	}
 }
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index d112af7..6da2bbd 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -626,7 +626,7 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
 	 */
 	val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
 	       | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
-	__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
+	__vcpu_sys_reg(vcpu, r->reg) = val;
 }
 
 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
@@ -968,13 +968,13 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
 #define DBG_BCR_BVR_WCR_WVR_EL1(n)					\
 	{ SYS_DESC(SYS_DBGBVRn_EL1(n)),					\
-	  trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr },		\
+	  trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr },		\
 	{ SYS_DESC(SYS_DBGBCRn_EL1(n)),					\
-	  trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr },		\
+	  trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr },		\
 	{ SYS_DESC(SYS_DBGWVRn_EL1(n)),					\
-	  trap_wvr, reset_wvr, n, 0,  get_wvr, set_wvr },		\
+	  trap_wvr, reset_wvr, 0, 0,  get_wvr, set_wvr },		\
 	{ SYS_DESC(SYS_DBGWCRn_EL1(n)),					\
-	  trap_wcr, reset_wcr, n, 0,  get_wcr, set_wcr }
+	  trap_wcr, reset_wcr, 0, 0,  get_wcr, set_wcr }
 
 /* Macro to expand the PMEVCNTRn_EL0 register */
 #define PMU_PMEVCNTR_EL0(n)						\
@@ -1359,7 +1359,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
 
 	{ SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 },
 
-	{ SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
+	{ SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
 	{ SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
 	{ SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
 	{ SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
@@ -2072,13 +2072,19 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu,
 }
 
 static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
-			      const struct sys_reg_desc *table, size_t num)
+				const struct sys_reg_desc *table, size_t num,
+				unsigned long *bmap)
 {
 	unsigned long i;
 
 	for (i = 0; i < num; i++)
-		if (table[i].reset)
+		if (table[i].reset) {
+			int reg = table[i].reg;
+
 			table[i].reset(vcpu, &table[i]);
+			if (reg > 0 && reg < NR_SYS_REGS)
+				set_bit(reg, bmap);
+		}
 }
 
 /**
@@ -2576,18 +2582,16 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
 {
 	size_t num;
 	const struct sys_reg_desc *table;
-
-	/* Catch someone adding a register without putting in reset entry. */
-	memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
+	DECLARE_BITMAP(bmap, NR_SYS_REGS) = { 0, };
 
 	/* Generic chip reset first (so target could override). */
-	reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
+	reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs), bmap);
 
 	table = get_target_table(vcpu->arch.target, true, &num);
-	reset_sys_reg_descs(vcpu, table, num);
+	reset_sys_reg_descs(vcpu, table, num, bmap);
 
 	for (num = 1; num < NR_SYS_REGS; num++) {
-		if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242,
+		if (WARN(!test_bit(num, bmap),
 			 "Didn't reset __vcpu_sys_reg(%zi)\n", num))
 			break;
 	}
diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c
index 97d5239..428ef21 100644
--- a/arch/mips/kernel/cacheinfo.c
+++ b/arch/mips/kernel/cacheinfo.c
@@ -80,6 +80,8 @@ static int __populate_cache_leaves(unsigned int cpu)
 	if (c->tcache.waysize)
 		populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED);
 
+	this_cpu_ci->cpu_map_populated = true;
+
 	return 0;
 }
 
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
index 5f209f1..df7ddd2 100644
--- a/arch/mips/kernel/i8253.c
+++ b/arch/mips/kernel/i8253.c
@@ -32,7 +32,8 @@ void __init setup_pit_timer(void)
 
 static int __init init_pit_clocksource(void)
 {
-	if (num_possible_cpus() > 1) /* PIT does not scale! */
+	if (num_possible_cpus() > 1 || /* PIT does not scale! */
+	    !clockevent_state_periodic(&i8253_clockevent))
 		return 0;
 
 	return clocksource_i8253_init();
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 262ba94..1bf6aae 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -135,7 +135,7 @@
 	subf	r8,r6,r4		/* compute length */
 	add	r8,r8,r5		/* ensure we get enough */
 	lwz	r9,DCACHEL1LOGBLOCKSIZE(r10)	/* Get log-2 of dcache block size */
-	srw.	r8,r8,r9		/* compute line count */
+	srd.	r8,r8,r9		/* compute line count */
 	beqlr				/* nothing to do? */
 	mtctr	r8
 0:	dcbst	0,r6
@@ -153,7 +153,7 @@
 	subf	r8,r6,r4		/* compute length */
 	add	r8,r8,r5		/* ensure we get enough */
 	lwz	r9,DCACHEL1LOGBLOCKSIZE(r10)/* Get log-2 of dcache block size */
-	srw.	r8,r8,r9		/* compute line count */
+	srd.	r8,r8,r9		/* compute line count */
 	beqlr				/* nothing to do? */
 	sync
 	isync
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 578174a3..51cd66d 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -61,6 +61,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
 	return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
 }
 
+bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
+{
+	return kvm_arch_vcpu_runnable(vcpu);
+}
+
 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
 {
 	return false;
diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
index dd6b05b..d911a8c 100644
--- a/arch/riscv/include/asm/switch_to.h
+++ b/arch/riscv/include/asm/switch_to.h
@@ -23,7 +23,7 @@ extern void __fstate_restore(struct task_struct *restore_from);
 
 static inline void __fstate_clean(struct pt_regs *regs)
 {
-	regs->sstatus |= (regs->sstatus & ~(SR_FS)) | SR_FS_CLEAN;
+	regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_CLEAN;
 }
 
 static inline void fstate_save(struct task_struct *task,
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 41e3908..0d75329 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -176,6 +176,8 @@ static inline int devmem_is_allowed(unsigned long pfn)
 #define VM_DATA_DEFAULT_FLAGS	(VM_READ | VM_WRITE | \
 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
 
+#define ARCH_ZONE_DMA_BITS	31
+
 #include <asm-generic/memory_model.h>
 #include <asm-generic/getorder.h>
 
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index b43f8d3..18ede6e 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -31,10 +31,9 @@
 SECTIONS
 {
 	. = 0x100000;
-	_stext = .;		/* Start of text section */
 	.text : {
-		/* Text and read-only data */
-		_text = .;
+		_stext = .;		/* Start of text section */
+		_text = .;		/* Text and read-only data */
 		HEAD_TEXT
 		TEXT_TEXT
 		SCHED_TEXT
@@ -46,11 +45,10 @@
 		*(.text.*_indirect_*)
 		*(.fixup)
 		*(.gnu.warning)
+		. = ALIGN(PAGE_SIZE);
+		_etext = .;		/* End of text section */
 	} :text = 0x0700
 
-	. = ALIGN(PAGE_SIZE);
-	_etext = .;		/* End of text section */
-
 	NOTES :text :note
 
 	.dummy : { *(.dummy) } :data
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
index d9ff3b4..2569ffc 100644
--- a/arch/sh/kernel/hw_breakpoint.c
+++ b/arch/sh/kernel/hw_breakpoint.c
@@ -160,6 +160,7 @@ int arch_bp_generic_fields(int sh_len, int sh_type,
 	switch (sh_type) {
 	case SH_BREAKPOINT_READ:
 		*gen_type = HW_BREAKPOINT_R;
+		break;
 	case SH_BREAKPOINT_WRITE:
 		*gen_type = HW_BREAKPOINT_W;
 		break;
diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
index c4428a1..2622c07 100644
--- a/arch/x86/boot/string.c
+++ b/arch/x86/boot/string.c
@@ -34,6 +34,14 @@ int memcmp(const void *s1, const void *s2, size_t len)
 	return diff;
 }
 
+/*
+ * Clang may lower `memcmp == 0` to `bcmp == 0`.
+ */
+int bcmp(const void *s1, const void *s2, size_t len)
+{
+	return memcmp(s1, s2, len);
+}
+
 int strcmp(const char *str1, const char *str2)
 {
 	const unsigned char *s1 = (const unsigned char *)str1;
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
index a07ffd2..d3983fd 100644
--- a/arch/x86/include/asm/bootparam_utils.h
+++ b/arch/x86/include/asm/bootparam_utils.h
@@ -18,6 +18,20 @@
  * Note: efi_info is commonly left uninitialized, but that field has a
  * private magic, so it is better to leave it unchanged.
  */
+
+#define sizeof_mbr(type, member) ({ sizeof(((type *)0)->member); })
+
+#define BOOT_PARAM_PRESERVE(struct_member)				\
+	{								\
+		.start = offsetof(struct boot_params, struct_member),	\
+		.len   = sizeof_mbr(struct boot_params, struct_member),	\
+	}
+
+struct boot_params_to_save {
+	unsigned int start;
+	unsigned int len;
+};
+
 static void sanitize_boot_params(struct boot_params *boot_params)
 {
 	/* 
@@ -36,19 +50,40 @@ static void sanitize_boot_params(struct boot_params *boot_params)
 	 */
 	if (boot_params->sentinel) {
 		/* fields in boot_params are left uninitialized, clear them */
-		memset(&boot_params->ext_ramdisk_image, 0,
-		       (char *)&boot_params->efi_info -
-			(char *)&boot_params->ext_ramdisk_image);
-		memset(&boot_params->kbd_status, 0,
-		       (char *)&boot_params->hdr -
-		       (char *)&boot_params->kbd_status);
-		memset(&boot_params->_pad7[0], 0,
-		       (char *)&boot_params->edd_mbr_sig_buffer[0] -
-			(char *)&boot_params->_pad7[0]);
-		memset(&boot_params->_pad8[0], 0,
-		       (char *)&boot_params->eddbuf[0] -
-			(char *)&boot_params->_pad8[0]);
-		memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9));
+		static struct boot_params scratch;
+		char *bp_base = (char *)boot_params;
+		char *save_base = (char *)&scratch;
+		int i;
+
+		const struct boot_params_to_save to_save[] = {
+			BOOT_PARAM_PRESERVE(screen_info),
+			BOOT_PARAM_PRESERVE(apm_bios_info),
+			BOOT_PARAM_PRESERVE(tboot_addr),
+			BOOT_PARAM_PRESERVE(ist_info),
+			BOOT_PARAM_PRESERVE(hd0_info),
+			BOOT_PARAM_PRESERVE(hd1_info),
+			BOOT_PARAM_PRESERVE(sys_desc_table),
+			BOOT_PARAM_PRESERVE(olpc_ofw_header),
+			BOOT_PARAM_PRESERVE(efi_info),
+			BOOT_PARAM_PRESERVE(alt_mem_k),
+			BOOT_PARAM_PRESERVE(scratch),
+			BOOT_PARAM_PRESERVE(e820_entries),
+			BOOT_PARAM_PRESERVE(eddbuf_entries),
+			BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
+			BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
+			BOOT_PARAM_PRESERVE(hdr),
+			BOOT_PARAM_PRESERVE(e820_table),
+			BOOT_PARAM_PRESERVE(eddbuf),
+		};
+
+		memset(&scratch, 0, sizeof(scratch));
+
+		for (i = 0; i < ARRAY_SIZE(to_save); i++) {
+			memcpy(save_base + to_save[i].start,
+			       bp_base + to_save[i].start, to_save[i].len);
+		}
+
+		memcpy(boot_params, save_base, sizeof(*boot_params));
 	}
 }
 
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 2877e1fba..3245b95 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1113,6 +1113,7 @@ struct kvm_x86_ops {
 	int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
 			      uint32_t guest_irq, bool set);
 	void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
+	bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
 
 	int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc);
 	void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index f85f43d..a1d22e4 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -334,6 +334,7 @@
 #define MSR_AMD64_PATCH_LEVEL		0x0000008b
 #define MSR_AMD64_TSC_RATIO		0xc0000104
 #define MSR_AMD64_NB_CFG		0xc001001f
+#define MSR_AMD64_CPUID_FN_1		0xc0011004
 #define MSR_AMD64_PATCH_LOADER		0xc0010020
 #define MSR_AMD64_OSVW_ID_LENGTH	0xc0010140
 #define MSR_AMD64_OSVW_STATUS		0xc0010141
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 599c273..28cb2b3 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -202,7 +202,7 @@
 	"    	lfence;\n"					\
 	"       jmp    902b;\n"					\
 	"       .align 16\n"					\
-	"903:	addl   $4, %%esp;\n"				\
+	"903:	lea    4(%%esp), %%esp;\n"			\
 	"       pushl  %[thunk_target];\n"			\
 	"       ret;\n"						\
 	"       .align 16\n"					\
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 272a128..b316bd6 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -715,7 +715,7 @@ static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
 static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
 
 /*
- * Temporary interrupt handler.
+ * Temporary interrupt handler and polled calibration function.
  */
 static void __init lapic_cal_handler(struct clock_event_device *dev)
 {
@@ -799,7 +799,8 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
 static int __init calibrate_APIC_clock(void)
 {
 	struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
-	void (*real_handler)(struct clock_event_device *dev);
+	u64 tsc_perj = 0, tsc_start = 0;
+	unsigned long jif_start;
 	unsigned long deltaj;
 	long delta, deltatsc;
 	int pm_referenced = 0;
@@ -830,29 +831,65 @@ static int __init calibrate_APIC_clock(void)
 	apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
 		    "calibrating APIC timer ...\n");
 
+	/*
+	 * There are platforms w/o global clockevent devices. Instead of
+	 * making the calibration conditional on that, use a polling based
+	 * approach everywhere.
+	 */
 	local_irq_disable();
 
-	/* Replace the global interrupt handler */
-	real_handler = global_clock_event->event_handler;
-	global_clock_event->event_handler = lapic_cal_handler;
-
 	/*
 	 * Setup the APIC counter to maximum. There is no way the lapic
 	 * can underflow in the 100ms detection time frame
 	 */
 	__setup_APIC_LVTT(0xffffffff, 0, 0);
 
-	/* Let the interrupts run */
+	/*
+	 * Methods to terminate the calibration loop:
+	 *  1) Global clockevent if available (jiffies)
+	 *  2) TSC if available and frequency is known
+	 */
+	jif_start = READ_ONCE(jiffies);
+
+	if (tsc_khz) {
+		tsc_start = rdtsc();
+		tsc_perj = div_u64((u64)tsc_khz * 1000, HZ);
+	}
+
+	/*
+	 * Enable interrupts so the tick can fire, if a global
+	 * clockevent device is available
+	 */
 	local_irq_enable();
 
-	while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
-		cpu_relax();
+	while (lapic_cal_loops <= LAPIC_CAL_LOOPS) {
+		/* Wait for a tick to elapse */
+		while (1) {
+			if (tsc_khz) {
+				u64 tsc_now = rdtsc();
+				if ((tsc_now - tsc_start) >= tsc_perj) {
+					tsc_start += tsc_perj;
+					break;
+				}
+			} else {
+				unsigned long jif_now = READ_ONCE(jiffies);
+
+				if (time_after(jif_now, jif_start)) {
+					jif_start = jif_now;
+					break;
+				}
+			}
+			cpu_relax();
+		}
+
+		/* Invoke the calibration routine */
+		local_irq_disable();
+		lapic_cal_handler(NULL);
+		local_irq_enable();
+	}
 
 	local_irq_disable();
 
-	/* Restore the real event handler */
-	global_clock_event->event_handler = real_handler;
-
 	/* Build delta t1-t2 as apic timer counts down */
 	delta = lapic_cal_t1 - lapic_cal_t2;
 	apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);
@@ -904,10 +941,11 @@ static int __init calibrate_APIC_clock(void)
 	levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
 
 	/*
-	 * PM timer calibration failed or not turned on
-	 * so lets try APIC timer based calibration
+	 * PM timer calibration failed or not turned on so lets try APIC
+	 * timer based calibration, if a global clockevent device is
+	 * available.
 	 */
-	if (!pm_referenced) {
+	if (!pm_referenced && global_clock_event) {
 		apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
 
 		/*
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index da1f5e7..f86f912 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -799,6 +799,64 @@ static void init_amd_ln(struct cpuinfo_x86 *c)
 	msr_set_bit(MSR_AMD64_DE_CFG, 31);
 }
 
+static bool rdrand_force;
+
+static int __init rdrand_cmdline(char *str)
+{
+	if (!str)
+		return -EINVAL;
+
+	if (!strcmp(str, "force"))
+		rdrand_force = true;
+	else
+		return -EINVAL;
+
+	return 0;
+}
+early_param("rdrand", rdrand_cmdline);
+
+static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
+{
+	/*
+	 * Saving of the MSR used to hide the RDRAND support during
+	 * suspend/resume is done by arch/x86/power/cpu.c, which is
+	 * dependent on CONFIG_PM_SLEEP.
+	 */
+	if (!IS_ENABLED(CONFIG_PM_SLEEP))
+		return;
+
+	/*
+	 * The nordrand option can clear X86_FEATURE_RDRAND, so check for
+	 * RDRAND support using the CPUID function directly.
+	 */
+	if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
+		return;
+
+	msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
+
+	/*
+	 * Verify that the CPUID change has occurred in case the kernel is
+	 * running virtualized and the hypervisor doesn't support the MSR.
+	 */
+	if (cpuid_ecx(1) & BIT(30)) {
+		pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
+		return;
+	}
+
+	clear_cpu_cap(c, X86_FEATURE_RDRAND);
+	pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
+}
+
+static void init_amd_jg(struct cpuinfo_x86 *c)
+{
+	/*
+	 * Some BIOS implementations do not restore proper RDRAND support
+	 * across suspend and resume. Check on whether to hide the RDRAND
+	 * instruction support via CPUID.
+	 */
+	clear_rdrand_cpuid_bit(c);
+}
+
 static void init_amd_bd(struct cpuinfo_x86 *c)
 {
 	u64 value;
@@ -813,6 +871,13 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
 			wrmsrl_safe(MSR_F15H_IC_CFG, value);
 		}
 	}
+
+	/*
+	 * Some BIOS implementations do not restore proper RDRAND support
+	 * across suspend and resume. Check on whether to hide the RDRAND
+	 * instruction support via CPUID.
+	 */
+	clear_rdrand_cpuid_bit(c);
 }
 
 static void init_amd_zn(struct cpuinfo_x86 *c)
@@ -855,6 +920,7 @@ static void init_amd(struct cpuinfo_x86 *c)
 	case 0x10: init_amd_gh(c); break;
 	case 0x12: init_amd_ln(c); break;
 	case 0x15: init_amd_bd(c); break;
+	case 0x16: init_amd_jg(c); break;
 	case 0x17: init_amd_zn(c); break;
 	}
 
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ea454d3..0f33f00 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5146,6 +5146,11 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
 		kvm_vcpu_wake_up(vcpu);
 }
 
+static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
+{
+	return false;
+}
+
 static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
 {
 	unsigned long flags;
@@ -7203,6 +7208,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 
 	.pmu_ops = &amd_pmu_ops,
 	.deliver_posted_interrupt = svm_deliver_avic_intr,
+	.dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
 	.update_pi_irte = svm_update_pi_irte,
 	.setup_mce = svm_setup_mce,
 
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4cf16378..2e310ea 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -10411,6 +10411,11 @@ static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
 	return ((rvi & 0xf0) > (vppr & 0xf0));
 }
 
+static bool vmx_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
+{
+	return pi_test_on(vcpu_to_pi_desc(vcpu));
+}
+
 static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
 {
 	if (!kvm_vcpu_apicv_active(vcpu))
@@ -14387,6 +14392,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
 	.guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
 	.sync_pir_to_irr = vmx_sync_pir_to_irr,
 	.deliver_posted_interrupt = vmx_deliver_posted_interrupt,
+	.dy_apicv_has_pending_interrupt = vmx_dy_apicv_has_pending_interrupt,
 
 	.set_tss_addr = vmx_set_tss_addr,
 	.set_identity_map_addr = vmx_set_identity_map_addr,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index cea6568..e10a7a4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9336,6 +9336,22 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 	return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
 }
 
+bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
+{
+	if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
+		return true;
+
+	if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
+		kvm_test_request(KVM_REQ_SMI, vcpu) ||
+		 kvm_test_request(KVM_REQ_EVENT, vcpu))
+		return true;
+
+	if (vcpu->arch.apicv_active && kvm_x86_ops->dy_apicv_has_pending_interrupt(vcpu))
+		return true;
+
+	return false;
+}
+
 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
 {
 	return vcpu->arch.preempted_in_kernel;
diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c
index 2dd1fe13..19f7079 100644
--- a/arch/x86/lib/cpu.c
+++ b/arch/x86/lib/cpu.c
@@ -1,5 +1,6 @@
 #include <linux/types.h>
 #include <linux/export.h>
+#include <asm/cpu.h>
 
 unsigned int x86_family(unsigned int sig)
 {
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 9d9765e..1bcb724 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -261,13 +261,14 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
 
 	pmd = pmd_offset(pud, address);
 	pmd_k = pmd_offset(pud_k, address);
+
+	if (pmd_present(*pmd) != pmd_present(*pmd_k))
+		set_pmd(pmd, *pmd_k);
+
 	if (!pmd_present(*pmd_k))
 		return NULL;
-
-	if (!pmd_present(*pmd))
-		set_pmd(pmd, *pmd_k);
 	else
-		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
+		BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
 
 	return pmd_k;
 }
@@ -287,17 +288,13 @@ void vmalloc_sync_all(void)
 		spin_lock(&pgd_lock);
 		list_for_each_entry(page, &pgd_list, lru) {
 			spinlock_t *pgt_lock;
-			pmd_t *ret;
 
 			/* the pgt_lock only for Xen */
 			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
 
 			spin_lock(pgt_lock);
-			ret = vmalloc_sync_one(page_address(page), address);
+			vmalloc_sync_one(page_address(page), address);
 			spin_unlock(pgt_lock);
-
-			if (!ret)
-				break;
 		}
 		spin_unlock(&pgd_lock);
 	}
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 513ce09..3aa3149 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -13,6 +13,7 @@
 #include <linux/smp.h>
 #include <linux/perf_event.h>
 #include <linux/tboot.h>
+#include <linux/dmi.h>
 
 #include <asm/pgtable.h>
 #include <asm/proto.h>
@@ -24,7 +25,7 @@
 #include <asm/debugreg.h>
 #include <asm/cpu.h>
 #include <asm/mmu_context.h>
-#include <linux/dmi.h>
+#include <asm/cpu_device_id.h>
 
 #ifdef CONFIG_X86_32
 __visible unsigned long saved_context_ebx;
@@ -398,15 +399,14 @@ static int __init bsp_pm_check_init(void)
 
 core_initcall(bsp_pm_check_init);
 
-static int msr_init_context(const u32 *msr_id, const int total_num)
+static int msr_build_context(const u32 *msr_id, const int num)
 {
-	int i = 0;
+	struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
 	struct saved_msr *msr_array;
+	int total_num;
+	int i, j;
 
-	if (saved_context.saved_msrs.array || saved_context.saved_msrs.num > 0) {
-		pr_err("x86/pm: MSR quirk already applied, please check your DMI match table.\n");
-		return -EINVAL;
-	}
+	total_num = saved_msrs->num + num;
 
 	msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
 	if (!msr_array) {
@@ -414,19 +414,30 @@ static int msr_init_context(const u32 *msr_id, const int total_num)
 		return -ENOMEM;
 	}
 
-	for (i = 0; i < total_num; i++) {
-		msr_array[i].info.msr_no	= msr_id[i];
+	if (saved_msrs->array) {
+		/*
+		 * Multiple callbacks can invoke this function, so copy any
+		 * MSR save requests from previous invocations.
+		 */
+		memcpy(msr_array, saved_msrs->array,
+		       sizeof(struct saved_msr) * saved_msrs->num);
+
+		kfree(saved_msrs->array);
+	}
+
+	for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
+		msr_array[i].info.msr_no	= msr_id[j];
 		msr_array[i].valid		= false;
 		msr_array[i].info.reg.q		= 0;
 	}
-	saved_context.saved_msrs.num	= total_num;
-	saved_context.saved_msrs.array	= msr_array;
+	saved_msrs->num   = total_num;
+	saved_msrs->array = msr_array;
 
 	return 0;
 }
 
 /*
- * The following section is a quirk framework for problematic BIOSen:
+ * The following sections are a quirk framework for problematic BIOSen:
  * Sometimes MSRs are modified by the BIOSen after suspended to
  * RAM, this might cause unexpected behavior after wakeup.
  * Thus we save/restore these specified MSRs across suspend/resume
@@ -441,7 +452,7 @@ static int msr_initialize_bdw(const struct dmi_system_id *d)
 	u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
 
 	pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
-	return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
+	return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
 }
 
 static const struct dmi_system_id msr_save_dmi_table[] = {
@@ -456,9 +467,58 @@ static const struct dmi_system_id msr_save_dmi_table[] = {
 	{}
 };
 
+static int msr_save_cpuid_features(const struct x86_cpu_id *c)
+{
+	u32 cpuid_msr_id[] = {
+		MSR_AMD64_CPUID_FN_1,
+	};
+
+	pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
+		c->family);
+
+	return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
+}
+
+static const struct x86_cpu_id msr_save_cpu_table[] = {
+	{
+		.vendor = X86_VENDOR_AMD,
+		.family = 0x15,
+		.model = X86_MODEL_ANY,
+		.feature = X86_FEATURE_ANY,
+		.driver_data = (kernel_ulong_t)msr_save_cpuid_features,
+	},
+	{
+		.vendor = X86_VENDOR_AMD,
+		.family = 0x16,
+		.model = X86_MODEL_ANY,
+		.feature = X86_FEATURE_ANY,
+		.driver_data = (kernel_ulong_t)msr_save_cpuid_features,
+	},
+	{}
+};
+
+typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
+static int pm_cpu_check(const struct x86_cpu_id *c)
+{
+	const struct x86_cpu_id *m;
+	int ret = 0;
+
+	m = x86_match_cpu(msr_save_cpu_table);
+	if (m) {
+		pm_cpu_match_t fn;
+
+		fn = (pm_cpu_match_t)m->driver_data;
+		ret = fn(m);
+	}
+
+	return ret;
+}
+
 static int pm_check_save_msr(void)
 {
 	dmi_check_system(msr_save_dmi_table);
+	pm_cpu_check(msr_save_cpu_table);
+
 	return 0;
 }
 
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index 3cf302b..8901a1f 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -6,6 +6,9 @@
 targets += $(purgatory-y)
 PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
 
+$(obj)/string.o: $(srctree)/arch/x86/boot/compressed/string.c FORCE
+	$(call if_changed_rule,cc_o_c)
+
 $(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE
 	$(call if_changed_rule,cc_o_c)
 
@@ -17,11 +20,34 @@
 
 # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
 # in turn leaves some undefined symbols like __fentry__ in purgatory and not
-# sure how to relocate those. Like kexec-tools, use custom flags.
+# sure how to relocate those.
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_sha256.o		+= $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_purgatory.o	+= $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_string.o		+= $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_kexec-purgatory.o	+= $(CC_FLAGS_FTRACE)
+endif
 
-KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -Os -mcmodel=large
-KBUILD_CFLAGS += -m$(BITS)
-KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
+ifdef CONFIG_STACKPROTECTOR
+CFLAGS_REMOVE_sha256.o		+= -fstack-protector
+CFLAGS_REMOVE_purgatory.o	+= -fstack-protector
+CFLAGS_REMOVE_string.o		+= -fstack-protector
+CFLAGS_REMOVE_kexec-purgatory.o	+= -fstack-protector
+endif
+
+ifdef CONFIG_STACKPROTECTOR_STRONG
+CFLAGS_REMOVE_sha256.o		+= -fstack-protector-strong
+CFLAGS_REMOVE_purgatory.o	+= -fstack-protector-strong
+CFLAGS_REMOVE_string.o		+= -fstack-protector-strong
+CFLAGS_REMOVE_kexec-purgatory.o	+= -fstack-protector-strong
+endif
+
+ifdef CONFIG_RETPOLINE
+CFLAGS_REMOVE_sha256.o		+= $(RETPOLINE_CFLAGS)
+CFLAGS_REMOVE_purgatory.o	+= $(RETPOLINE_CFLAGS)
+CFLAGS_REMOVE_string.o		+= $(RETPOLINE_CFLAGS)
+CFLAGS_REMOVE_kexec-purgatory.o	+= $(RETPOLINE_CFLAGS)
+endif
 
 $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
 		$(call if_changed,ld)
diff --git a/arch/x86/purgatory/purgatory.c b/arch/x86/purgatory/purgatory.c
index 025c34a..7971f7a 100644
--- a/arch/x86/purgatory/purgatory.c
+++ b/arch/x86/purgatory/purgatory.c
@@ -70,3 +70,9 @@ void purgatory(void)
 	}
 	copy_backup_region();
 }
+
+/*
+ * Defined in order to reuse memcpy() and memset() from
+ * arch/x86/boot/compressed/string.c
+ */
+void warn(const char *msg) {}
diff --git a/arch/x86/purgatory/string.c b/arch/x86/purgatory/string.c
deleted file mode 100644
index 795ca4f..0000000
--- a/arch/x86/purgatory/string.c
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Simple string functions.
- *
- * Copyright (C) 2014 Red Hat Inc.
- *
- * Author:
- *       Vivek Goyal <vgoyal@redhat.com>
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2.  See the file COPYING for more details.
- */
-
-#include <linux/types.h>
-
-#include "../boot/string.c"
-
-void *memcpy(void *dst, const void *src, size_t len)
-{
-	return __builtin_memcpy(dst, src, len);
-}
-
-void *memset(void *dst, int c, size_t len)
-{
-	return __builtin_memset(dst, c, len);
-}
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index a285fbd..15580e4 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -515,6 +515,7 @@ void cpu_reset(void)
 				      "add	%2, %2, %7\n\t"
 				      "addi	%0, %0, -1\n\t"
 				      "bnez	%0, 1b\n\t"
+				      "isync\n\t"
 				      /* Jump to identity mapping */
 				      "jx	%3\n"
 				      "2:\n\t"
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index becd793..d8d2ac2 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -1886,9 +1886,14 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
 	    blk_rq_pos(container_of(rb_prev(&req->rb_node),
 				    struct request, rb_node))) {
 		struct bfq_queue *bfqq = bfq_init_rq(req);
-		struct bfq_data *bfqd = bfqq->bfqd;
+		struct bfq_data *bfqd;
 		struct request *prev, *next_rq;
 
+		if (!bfqq)
+			return;
+
+		bfqd = bfqq->bfqd;
+
 		/* Reposition request in its sort_list */
 		elv_rb_del(&bfqq->sort_list, req);
 		elv_rb_add(&bfqq->sort_list, req);
@@ -1930,6 +1935,9 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
 	struct bfq_queue *bfqq = bfq_init_rq(rq),
 		*next_bfqq = bfq_init_rq(next);
 
+	if (!bfqq)
+		return;
+
 	/*
 	 * If next and rq belong to the same bfq_queue and next is older
 	 * than rq, then reposition rq in the fifo (by substituting next
@@ -4590,12 +4598,12 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
 
 	spin_lock_irq(&bfqd->lock);
 	bfqq = bfq_init_rq(rq);
-	if (at_head || blk_rq_is_passthrough(rq)) {
+	if (!bfqq || at_head || blk_rq_is_passthrough(rq)) {
 		if (at_head)
 			list_add(&rq->queuelist, &bfqd->dispatch);
 		else
 			list_add_tail(&rq->queuelist, &bfqd->dispatch);
-	} else { /* bfqq is assumed to be non null here */
+	} else {
 		idle_timer_disabled = __bfq_insert_request(bfqd, rq);
 		/*
 		 * Update bfqq, because, if a queue merge has occurred
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 43c2615..e11b5da 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -616,8 +616,8 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id,
 
 	/* Move to ITS specific data */
 	its = (struct acpi_iort_its_group *)node->node_data;
-	if (idx > its->its_count) {
-		dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n",
+	if (idx >= its->its_count) {
+		dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
 			idx, its->its_count);
 		return -ENXIO;
 	}
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index c92c10d..5bece97 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -313,6 +313,9 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
 		hpriv->phys[port] = NULL;
 		rc = 0;
 		break;
+	case -EPROBE_DEFER:
+		/* Do not complain yet */
+		break;
 
 	default:
 		dev_err(dev,
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 1984fc7..3a64fa4 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1803,6 +1803,21 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc)
 	return 1;
 }
 
+static bool ata_check_nblocks(struct scsi_cmnd *scmd, u32 n_blocks)
+{
+	struct request *rq = scmd->request;
+	u32 req_blocks;
+
+	if (!blk_rq_is_passthrough(rq))
+		return true;
+
+	req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size;
+	if (n_blocks > req_blocks)
+		return false;
+
+	return true;
+}
+
 /**
  *	ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
  *	@qc: Storage for translated ATA taskfile
@@ -1847,6 +1862,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
 		scsi_10_lba_len(cdb, &block, &n_block);
 		if (cdb[1] & (1 << 3))
 			tf_flags |= ATA_TFLAG_FUA;
+		if (!ata_check_nblocks(scmd, n_block))
+			goto invalid_fld;
 		break;
 	case READ_6:
 	case WRITE_6:
@@ -1861,6 +1878,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
 		 */
 		if (!n_block)
 			n_block = 256;
+		if (!ata_check_nblocks(scmd, n_block))
+			goto invalid_fld;
 		break;
 	case READ_16:
 	case WRITE_16:
@@ -1871,6 +1890,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
 		scsi_16_lba_len(cdb, &block, &n_block);
 		if (cdb[1] & (1 << 3))
 			tf_flags |= ATA_TFLAG_FUA;
+		if (!ata_check_nblocks(scmd, n_block))
+			goto invalid_fld;
 		break;
 	default:
 		DPRINTK("no-byte command\n");
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index c5ea0fc..873cc09 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -674,6 +674,10 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
 	unsigned int offset;
 	unsigned char *buf;
 
+	if (!qc->cursg) {
+		qc->curbytes = qc->nbytes;
+		return;
+	}
 	if (qc->curbytes == qc->nbytes - qc->sect_size)
 		ap->hsm_task_state = HSM_ST_LAST;
 
@@ -699,6 +703,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
 
 	if (qc->cursg_ofs == qc->cursg->length) {
 		qc->cursg = sg_next(qc->cursg);
+		if (!qc->cursg)
+			ap->hsm_task_state = HSM_ST_LAST;
 		qc->cursg_ofs = 0;
 	}
 }
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index 173e6f2..eefda51 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -56,7 +56,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
 	unsigned int ret;
 	struct rm_feature_desc *desc;
 	struct ata_taskfile tf;
-	static const char cdb[] = {  GPCMD_GET_CONFIGURATION,
+	static const char cdb[ATAPI_CDB_LEN] = {  GPCMD_GET_CONFIGURATION,
 			2,      /* only 1 feature descriptor requested */
 			0, 3,   /* 3, removable medium feature */
 			0, 0, 0,/* reserved */
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 81f9bd6..8ebe99b 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -5241,7 +5241,7 @@ static int drbd_do_auth(struct drbd_connection *connection)
 	unsigned int key_len;
 	char secret[SHARED_SECRET_MAX]; /* 64 byte */
 	unsigned int resp_size;
-	SHASH_DESC_ON_STACK(desc, connection->cram_hmac_tfm);
+	struct shash_desc *desc;
 	struct packet_info pi;
 	struct net_conf *nc;
 	int err, rv;
@@ -5254,6 +5254,13 @@ static int drbd_do_auth(struct drbd_connection *connection)
 	memcpy(secret, nc->shared_secret, key_len);
 	rcu_read_unlock();
 
+	desc = kmalloc(sizeof(struct shash_desc) +
+		       crypto_shash_descsize(connection->cram_hmac_tfm),
+		       GFP_KERNEL);
+	if (!desc) {
+		rv = -1;
+		goto fail;
+	}
 	desc->tfm = connection->cram_hmac_tfm;
 	desc->flags = 0;
 
@@ -5396,7 +5403,10 @@ static int drbd_do_auth(struct drbd_connection *connection)
 	kfree(peers_ch);
 	kfree(response);
 	kfree(right_response);
-	shash_desc_zero(desc);
+	if (desc) {
+		shash_desc_zero(desc);
+		kfree(desc);
+	}
 
 	return rv;
 }
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f1e63eb..cef8e00 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -886,7 +886,7 @@ static void loop_unprepare_queue(struct loop_device *lo)
 
 static int loop_kthread_worker_fn(void *worker_ptr)
 {
-	current->flags |= PF_LESS_THROTTLE;
+	current->flags |= PF_LESS_THROTTLE | PF_MEMALLOC_NOIO;
 	return kthread_worker_fn(worker_ptr);
 }
 
diff --git a/drivers/bus/mhi/core/mhi_init.c b/drivers/bus/mhi/core/mhi_init.c
index 6f88d91..16ac408 100644
--- a/drivers/bus/mhi/core/mhi_init.c
+++ b/drivers/bus/mhi/core/mhi_init.c
@@ -14,6 +14,14 @@
 #include <linux/mhi.h>
 #include "mhi_internal.h"
 
+const char * const mhi_log_level_str[MHI_MSG_LVL_MAX] = {
+	[MHI_MSG_LVL_VERBOSE] = "Verbose",
+	[MHI_MSG_LVL_INFO] = "Info",
+	[MHI_MSG_LVL_ERROR] = "Error",
+	[MHI_MSG_LVL_CRITICAL] = "Critical",
+	[MHI_MSG_LVL_MASK_ALL] = "Mask all",
+};
+
 const char * const mhi_ee_str[MHI_EE_MAX] = {
 	[MHI_EE_PBL] = "PBL",
 	[MHI_EE_SBL] = "SBL",
@@ -58,6 +66,7 @@ static const char * const mhi_pm_state_str[] = {
 	[MHI_PM_BIT_SYS_ERR_PROCESS] = "SYS_ERR Process",
 	[MHI_PM_BIT_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
 	[MHI_PM_BIT_LD_ERR_FATAL_DETECT] = "LD or Error Fatal Detect",
+	[MHI_PM_BIT_SHUTDOWN_NO_ACCESS] = "SHUTDOWN No Access",
 };
 
 struct mhi_bus mhi_bus;
@@ -72,6 +81,38 @@ const char *to_mhi_pm_state_str(enum MHI_PM_STATE state)
 	return mhi_pm_state_str[index];
 }
 
+static ssize_t log_level_show(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	struct mhi_device *mhi_dev = to_mhi_device(dev);
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			TO_MHI_LOG_LEVEL_STR(mhi_cntrl->log_lvl));
+}
+
+static ssize_t log_level_store(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf,
+			       size_t count)
+{
+	struct mhi_device *mhi_dev = to_mhi_device(dev);
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+	enum MHI_DEBUG_LEVEL log_level;
+
+	if (kstrtou32(buf, 0, &log_level) < 0)
+		return -EINVAL;
+
+	mhi_cntrl->log_lvl = log_level;
+
+	MHI_LOG("IPC log level changed to: %s\n",
+		TO_MHI_LOG_LEVEL_STR(log_level));
+
+	return count;
+}
+static DEVICE_ATTR_RW(log_level);
+
 static ssize_t bus_vote_show(struct device *dev,
 			     struct device_attribute *attr,
 			     char *buf)
@@ -130,27 +171,28 @@ static ssize_t device_vote_store(struct device *dev,
 }
 static DEVICE_ATTR_RW(device_vote);
 
-static struct attribute *mhi_vote_attrs[] = {
+static struct attribute *mhi_sysfs_attrs[] = {
+	&dev_attr_log_level.attr,
 	&dev_attr_bus_vote.attr,
 	&dev_attr_device_vote.attr,
 	NULL,
 };
 
-static const struct attribute_group mhi_vote_group = {
-	.attrs = mhi_vote_attrs,
+static const struct attribute_group mhi_sysfs_group = {
+	.attrs = mhi_sysfs_attrs,
 };
 
-int mhi_create_vote_sysfs(struct mhi_controller *mhi_cntrl)
+int mhi_create_sysfs(struct mhi_controller *mhi_cntrl)
 {
 	return sysfs_create_group(&mhi_cntrl->mhi_dev->dev.kobj,
-				  &mhi_vote_group);
+				  &mhi_sysfs_group);
 }
 
-void mhi_destroy_vote_sysfs(struct mhi_controller *mhi_cntrl)
+void mhi_destroy_sysfs(struct mhi_controller *mhi_cntrl)
 {
 	struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
 
-	sysfs_remove_group(&mhi_dev->dev.kobj, &mhi_vote_group);
+	sysfs_remove_group(&mhi_dev->dev.kobj, &mhi_sysfs_group);
 
 	/* relinquish any pending votes for device */
 	while (atomic_read(&mhi_dev->dev_vote))
diff --git a/drivers/bus/mhi/core/mhi_internal.h b/drivers/bus/mhi/core/mhi_internal.h
index 84a9626..735e4152 100644
--- a/drivers/bus/mhi/core/mhi_internal.h
+++ b/drivers/bus/mhi/core/mhi_internal.h
@@ -441,6 +441,11 @@ extern const char * const mhi_state_str[MHI_STATE_MAX];
 				  !mhi_state_str[state]) ? \
 				"INVALID_STATE" : mhi_state_str[state])
 
+extern const char * const mhi_log_level_str[MHI_MSG_LVL_MAX];
+#define TO_MHI_LOG_LEVEL_STR(level) ((level >= MHI_MSG_LVL_MAX || \
+				  !mhi_log_level_str[level]) ? \
+				"Mask all" : mhi_log_level_str[level])
+
 enum {
 	MHI_PM_BIT_DISABLE,
 	MHI_PM_BIT_POR,
@@ -454,6 +459,7 @@ enum {
 	MHI_PM_BIT_SYS_ERR_PROCESS,
 	MHI_PM_BIT_SHUTDOWN_PROCESS,
 	MHI_PM_BIT_LD_ERR_FATAL_DETECT,
+	MHI_PM_BIT_SHUTDOWN_NO_ACCESS,
 	MHI_PM_BIT_MAX
 };
 
@@ -473,6 +479,7 @@ enum MHI_PM_STATE {
 	MHI_PM_SHUTDOWN_PROCESS = BIT(MHI_PM_BIT_SHUTDOWN_PROCESS),
 	/* link not accessible */
 	MHI_PM_LD_ERR_FATAL_DETECT = BIT(MHI_PM_BIT_LD_ERR_FATAL_DETECT),
+	MHI_PM_SHUTDOWN_NO_ACCESS = BIT(MHI_PM_BIT_SHUTDOWN_NO_ACCESS),
 };
 
 #define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
@@ -480,7 +487,7 @@ enum MHI_PM_STATE {
 		MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
 		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR)))
 #define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR)
-#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT)
+#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state >= MHI_PM_LD_ERR_FATAL_DETECT)
 #define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & \
 					mhi_cntrl->db_access)
 #define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \
@@ -801,8 +808,8 @@ void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr);
 int mhi_init_timesync(struct mhi_controller *mhi_cntrl);
 int mhi_create_timesync_sysfs(struct mhi_controller *mhi_cntrl);
 void mhi_destroy_timesync(struct mhi_controller *mhi_cntrl);
-int mhi_create_vote_sysfs(struct mhi_controller *mhi_cntrl);
-void mhi_destroy_vote_sysfs(struct mhi_controller *mhi_cntrl);
+int mhi_create_sysfs(struct mhi_controller *mhi_cntrl);
+void mhi_destroy_sysfs(struct mhi_controller *mhi_cntrl);
 int mhi_early_notify_device(struct device *dev, void *data);
 
 /* timesync log support */
diff --git a/drivers/bus/mhi/core/mhi_pm.c b/drivers/bus/mhi/core/mhi_pm.c
index b951f4e..21bd70c9 100644
--- a/drivers/bus/mhi/core/mhi_pm.c
+++ b/drivers/bus/mhi/core/mhi_pm.c
@@ -34,9 +34,11 @@
  *     M0 -> FW_DL_ERR
  *     M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
  * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
- * L2: SHUTDOWN_PROCESS -> DISABLE
+ * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
+ *     SHUTDOWN_PROCESS -> DISABLE
  * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
- *     LD_ERR_FATAL_DETECT -> SHUTDOWN_PROCESS
+ *     LD_ERR_FATAL_DETECT -> SHUTDOWN_NO_ACCESS
+ *     SHUTDOWN_NO_ACCESS -> DISABLE
  */
 static struct mhi_pm_transitions const mhi_state_transitions[] = {
 	/* L0 States */
@@ -48,49 +50,52 @@ static struct mhi_pm_transitions const mhi_state_transitions[] = {
 		MHI_PM_POR,
 		MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
 		MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
-		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
+		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR |
+		MHI_PM_SHUTDOWN_NO_ACCESS
 	},
 	{
 		MHI_PM_M0,
 		MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
 		MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
-		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
+		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR |
+		MHI_PM_SHUTDOWN_NO_ACCESS
 	},
 	{
 		MHI_PM_M2,
 		MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
-		MHI_PM_LD_ERR_FATAL_DETECT
+		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
 	},
 	{
 		MHI_PM_M3_ENTER,
 		MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
-		MHI_PM_LD_ERR_FATAL_DETECT
+		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
 	},
 	{
 		MHI_PM_M3,
 		MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
-		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
+		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
 	},
 	{
 		MHI_PM_M3_EXIT,
 		MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
-		MHI_PM_LD_ERR_FATAL_DETECT
+		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
 	},
 	{
 		MHI_PM_FW_DL_ERR,
 		MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
-		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
+		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT |
+		MHI_PM_SHUTDOWN_NO_ACCESS
 	},
 	/* L1 States */
 	{
 		MHI_PM_SYS_ERR_DETECT,
 		MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
-		MHI_PM_LD_ERR_FATAL_DETECT
+		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
 	},
 	{
 		MHI_PM_SYS_ERR_PROCESS,
 		MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
-		MHI_PM_LD_ERR_FATAL_DETECT
+		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
 	},
 	/* L2 States */
 	{
@@ -100,7 +105,11 @@ static struct mhi_pm_transitions const mhi_state_transitions[] = {
 	/* L3 States */
 	{
 		MHI_PM_LD_ERR_FATAL_DETECT,
-		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_PROCESS
+		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
+	},
+	{
+		MHI_PM_SHUTDOWN_NO_ACCESS,
+		MHI_PM_DISABLE
 	},
 };
 
@@ -492,7 +501,7 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
 	mhi_create_devices(mhi_cntrl);
 
 	/* setup sysfs nodes for userspace votes */
-	mhi_create_vote_sysfs(mhi_cntrl);
+	mhi_create_sysfs(mhi_cntrl);
 
 	read_lock_bh(&mhi_cntrl->pm_lock);
 
@@ -602,7 +611,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
 	MHI_LOG("Finish resetting channels\n");
 
 	/* remove support for userspace votes */
-	mhi_destroy_vote_sysfs(mhi_cntrl);
+	mhi_destroy_sysfs(mhi_cntrl);
 
 	MHI_LOG("Waiting for all pending threads to complete\n");
 	wake_up_all(&mhi_cntrl->state_event);
@@ -959,6 +968,7 @@ EXPORT_SYMBOL(mhi_control_error);
 void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
 {
 	enum MHI_PM_STATE cur_state;
+	enum MHI_PM_STATE transition_state = MHI_PM_SHUTDOWN_PROCESS;
 
 	/* if it's not graceful shutdown, force MHI to a linkdown state */
 	if (!graceful) {
@@ -972,8 +982,10 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
 			MHI_ERR("Failed to move to state:%s from:%s\n",
 				to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT),
 				to_mhi_pm_state_str(mhi_cntrl->pm_state));
+
+		transition_state = MHI_PM_SHUTDOWN_NO_ACCESS;
 	}
-	mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS);
+	mhi_pm_disable_transition(mhi_cntrl, transition_state);
 
 	mhi_deinit_debugfs(mhi_cntrl);
 
@@ -1095,10 +1107,8 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
 
 	/* notify any clients we enter lpm */
 	list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
-		mutex_lock(&itr->mutex);
 		if (itr->mhi_dev)
 			mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
-		mutex_unlock(&itr->mutex);
 	}
 
 	return 0;
@@ -1201,10 +1211,8 @@ int mhi_pm_fast_suspend(struct mhi_controller *mhi_cntrl, bool notify_client)
 
 	/* notify any clients we enter lpm */
 	list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
-		mutex_lock(&itr->mutex);
 		if (itr->mhi_dev)
 			mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
-		mutex_unlock(&itr->mutex);
 	}
 
 	return 0;
@@ -1240,10 +1248,8 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
 
 	/* notify any clients we enter lpm */
 	list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
-		mutex_lock(&itr->mutex);
 		if (itr->mhi_dev)
 			mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
-		mutex_unlock(&itr->mutex);
 	}
 
 	write_lock_irq(&mhi_cntrl->pm_lock);
@@ -1316,10 +1322,8 @@ int mhi_pm_fast_resume(struct mhi_controller *mhi_cntrl, bool notify_client)
 	if (notify_client) {
 		list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans,
 					 node) {
-			mutex_lock(&itr->mutex);
 			if (itr->mhi_dev)
 				mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
-			mutex_unlock(&itr->mutex);
 		}
 	}
 
@@ -1335,6 +1339,7 @@ int mhi_pm_fast_resume(struct mhi_controller *mhi_cntrl, bool notify_client)
 	switch (mhi_cntrl->pm_state) {
 	case MHI_PM_M0:
 		mhi_pm_m0_transition(mhi_cntrl);
+		break;
 	case MHI_PM_M2:
 		read_lock_bh(&mhi_cntrl->pm_lock);
 		mhi_cntrl->wake_get(mhi_cntrl, true);
diff --git a/drivers/bus/mhi/devices/mhi_netdev.c b/drivers/bus/mhi/devices/mhi_netdev.c
index 772964a..2e6dcde 100644
--- a/drivers/bus/mhi/devices/mhi_netdev.c
+++ b/drivers/bus/mhi/devices/mhi_netdev.c
@@ -866,6 +866,22 @@ static const struct file_operations debugfs_stats = {
 	.read = seq_read,
 };
 
+static int mhi_netdev_debugfs_chain(void *data, u64 val)
+{
+	struct mhi_netdev *mhi_netdev = data;
+	struct mhi_netdev *rsc_dev = mhi_netdev->rsc_dev;
+
+	mhi_netdev->chain = NULL;
+
+	if (rsc_dev)
+		rsc_dev->chain = NULL;
+
+	return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(debugfs_chain, NULL,
+			 mhi_netdev_debugfs_chain, "%llu\n");
+
 static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev)
 {
 	char node_name[32];
@@ -885,6 +901,8 @@ static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev)
 
 	debugfs_create_file_unsafe("stats", 0444, mhi_netdev->dentry,
 				   mhi_netdev, &debugfs_stats);
+	debugfs_create_file_unsafe("chain", 0444, mhi_netdev->dentry,
+				   mhi_netdev, &debugfs_chain);
 }
 
 static void mhi_netdev_create_debugfs_dir(void)
diff --git a/drivers/bus/mhi/devices/mhi_satellite.c b/drivers/bus/mhi/devices/mhi_satellite.c
index 162d8a6..9fff109 100644
--- a/drivers/bus/mhi/devices/mhi_satellite.c
+++ b/drivers/bus/mhi/devices/mhi_satellite.c
@@ -893,6 +893,8 @@ static int mhi_sat_rpmsg_probe(struct rpmsg_device *rpdev)
 	if (!subsys)
 		return -EINVAL;
 
+	mutex_lock(&subsys->cntrl_mutex);
+
 	MHI_SUBSYS_LOG("Received RPMSG probe\n");
 
 	dev_set_drvdata(&rpdev->dev, subsys);
@@ -905,6 +907,8 @@ static int mhi_sat_rpmsg_probe(struct rpmsg_device *rpdev)
 		schedule_work(&sat_cntrl->connect_work);
 	spin_unlock_irq(&subsys->cntrl_lock);
 
+	mutex_unlock(&subsys->cntrl_mutex);
+
 	return 0;
 }
 
@@ -978,6 +982,13 @@ static void mhi_sat_dev_remove(struct mhi_device *mhi_dev)
 		return;
 	}
 
+	/*
+	 * cancel any pending work as it is possible that work gets queued
+	 * when rpmsg probe comes in before controller is removed
+	 */
+	cancel_work_sync(&sat_cntrl->connect_work);
+	cancel_work_sync(&sat_cntrl->process_work);
+
 	/* remove address mappings */
 	mutex_lock(&sat_cntrl->list_mutex);
 	list_for_each_entry_safe(buf, tmp, &sat_cntrl->addr_map_list, node) {
diff --git a/drivers/bus/mhi/devices/mhi_uci.c b/drivers/bus/mhi/devices/mhi_uci.c
index 978c627..e31eaa4 100644
--- a/drivers/bus/mhi/devices/mhi_uci.c
+++ b/drivers/bus/mhi/devices/mhi_uci.c
@@ -12,6 +12,7 @@
 #include <linux/of_device.h>
 #include <linux/poll.h>
 #include <linux/slab.h>
+#include <linux/termios.h>
 #include <linux/types.h>
 #include <linux/wait.h>
 #include <linux/uaccess.h>
@@ -46,6 +47,7 @@ struct uci_dev {
 	size_t mtu;
 	int ref_count;
 	bool enabled;
+	u32 tiocm;
 	void *ipc_log;
 };
 
@@ -145,11 +147,20 @@ static long mhi_uci_ioctl(struct file *file,
 {
 	struct uci_dev *uci_dev = file->private_data;
 	struct mhi_device *mhi_dev = uci_dev->mhi_dev;
+	struct uci_chan *uci_chan = &uci_dev->dl_chan;
 	long ret = -ERESTARTSYS;
 
 	mutex_lock(&uci_dev->mutex);
-	if (uci_dev->enabled)
+
+	if (cmd == TIOCMGET) {
+		spin_lock_bh(&uci_chan->lock);
+		ret = uci_dev->tiocm;
+		uci_dev->tiocm = 0;
+		spin_unlock_bh(&uci_chan->lock);
+	} else if (uci_dev->enabled) {
 		ret = mhi_ioctl(mhi_dev, cmd, arg);
+	}
+
 	mutex_unlock(&uci_dev->mutex);
 
 	return ret;
@@ -212,9 +223,16 @@ static unsigned int mhi_uci_poll(struct file *file, poll_table *wait)
 	spin_lock_bh(&uci_chan->lock);
 	if (!uci_dev->enabled) {
 		mask = POLLERR;
-	} else if (!list_empty(&uci_chan->pending) || uci_chan->cur_buf) {
-		MSG_VERB("Client can read from node\n");
-		mask |= POLLIN | POLLRDNORM;
+	} else {
+		if (!list_empty(&uci_chan->pending) || uci_chan->cur_buf) {
+			MSG_VERB("Client can read from node\n");
+			mask |= POLLIN | POLLRDNORM;
+		}
+
+		if (uci_dev->tiocm) {
+			MSG_VERB("Line status changed\n");
+			mask |= POLLPRI;
+		}
 	}
 	spin_unlock_bh(&uci_chan->lock);
 
@@ -646,6 +664,20 @@ static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev,
 	wake_up(&uci_chan->wq);
 }
 
+static void mhi_status_cb(struct mhi_device *mhi_dev, enum MHI_CB reason)
+{
+	struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev);
+	struct uci_chan *uci_chan = &uci_dev->dl_chan;
+	unsigned long flags;
+
+	if (reason == MHI_CB_DTR_SIGNAL) {
+		spin_lock_irqsave(&uci_chan->lock, flags);
+		uci_dev->tiocm = mhi_dev->tiocm;
+		spin_unlock_irqrestore(&uci_chan->lock, flags);
+		wake_up(&uci_chan->wq);
+	}
+}
+
 /* .driver_data stores max mtu */
 static const struct mhi_device_id mhi_uci_match_table[] = {
 	{ .chan = "LOOPBACK", .driver_data = 0x1000 },
@@ -664,6 +696,7 @@ static struct mhi_driver mhi_uci_driver = {
 	.probe = mhi_uci_probe,
 	.ul_xfer_cb = mhi_ul_xfer_cb,
 	.dl_xfer_cb = mhi_dl_xfer_cb,
+	.status_cb = mhi_status_cb,
 	.driver = {
 		.name = MHI_UCI_DRIVER_NAME,
 		.owner = THIS_MODULE,
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 9761de7..90bb9dd 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -48,6 +48,7 @@
 #define ADSP_MMAP_HEAP_ADDR 4
 #define ADSP_MMAP_REMOTE_HEAP_ADDR 8
 #define ADSP_MMAP_ADD_PAGES 0x1000
+#define ADSP_MMAP_ADD_PAGES_LLC  0x3000
 #define FASTRPC_DMAHANDLE_NOMAP (16)
 
 #define FASTRPC_ENOSUCH 39
@@ -98,7 +99,7 @@
 #define RH_CID ADSP_DOMAIN_ID
 
 #define PERF_KEYS \
-	"count:flush:map:copy:rpmsg:getargs:putargs:invalidate:invoke:tid:ptr"
+	"count:flush:map:copy:rpmsg:getargs:putargs:invalidate:invoke"
 #define FASTRPC_STATIC_HANDLE_PROCESS_GROUP (1)
 #define FASTRPC_STATIC_HANDLE_DSP_UTILITIES (2)
 #define FASTRPC_STATIC_HANDLE_LISTENER (3)
@@ -1439,7 +1440,7 @@ static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
 
 static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 {
-	remote_arg64_t *rpra, *lrpra;
+	remote_arg64_t *rpra;
 	remote_arg_t *lpra = ctx->lpra;
 	struct smq_invoke_buf *list;
 	struct smq_phy_page *pages, *ipage;
@@ -1454,7 +1455,11 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 	int mflags = 0;
 	uint64_t *fdlist;
 	uint32_t *crclist;
-	int64_t *perf_counter = getperfcounter(ctx->fl, PERF_COUNT);
+	uint32_t earlyHint;
+	int64_t *perf_counter = NULL;
+
+	if (ctx->fl->profile)
+		perf_counter = getperfcounter(ctx->fl, PERF_COUNT);
 
 	/* calculate size of the metadata */
 	rpra = NULL;
@@ -1493,8 +1498,10 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 		ipage += 1;
 	}
 	mutex_unlock(&ctx->fl->map_mutex);
+
+	/* metalen includes meta data, fds, crc and early wakeup hint */
 	metalen = copylen = (size_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST) +
-				 (sizeof(uint32_t) * M_CRCLIST);
+			(sizeof(uint32_t) * M_CRCLIST) + sizeof(earlyHint);
 
 	/* allocate new local rpra buffer */
 	lrpralen = (size_t)&list[0];
@@ -1503,11 +1510,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 		if (err)
 			goto bail;
 	}
-	if (ctx->lbuf->virt)
-		memset(ctx->lbuf->virt, 0, lrpralen);
-
-	lrpra = ctx->lbuf->virt;
-	ctx->lrpra = lrpra;
+	ctx->lrpra = ctx->lbuf->virt;
 
 	/* calculate len required for copying */
 	for (oix = 0; oix < inbufs + outbufs; ++oix) {
@@ -1557,13 +1560,13 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 
 	/* map ion buffers */
 	PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
-	for (i = 0; rpra && lrpra && i < inbufs + outbufs; ++i) {
+	for (i = 0; rpra && i < inbufs + outbufs; ++i) {
 		struct fastrpc_mmap *map = ctx->maps[i];
 		uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
 		size_t len = lpra[i].buf.len;
 
-		rpra[i].buf.pv = lrpra[i].buf.pv = 0;
-		rpra[i].buf.len = lrpra[i].buf.len = len;
+		rpra[i].buf.pv = 0;
+		rpra[i].buf.len = len;
 		if (!len)
 			continue;
 		if (map) {
@@ -1591,7 +1594,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 			pages[idx].addr = map->phys + offset;
 			pages[idx].size = num << PAGE_SHIFT;
 		}
-		rpra[i].buf.pv = lrpra[i].buf.pv = buf;
+		rpra[i].buf.pv = buf;
 	}
 	PERF_END);
 	for (i = bufs; i < bufs + handles; ++i) {
@@ -1601,15 +1604,16 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 		pages[i].size = map->size;
 	}
 	fdlist = (uint64_t *)&pages[bufs + handles];
-	for (i = 0; i < M_FDLIST; i++)
-		fdlist[i] = 0;
 	crclist = (uint32_t *)&fdlist[M_FDLIST];
-	memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
+	/* reset fds, crc and early wakeup hint memory */
+	/* remote process updates these values before responding */
+	memset(fdlist, 0, sizeof(uint64_t)*M_FDLIST +
+			sizeof(uint32_t)*M_CRCLIST + sizeof(earlyHint));
 
 	/* copy non ion buffers */
 	PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_COPY),
 	rlen = copylen - metalen;
-	for (oix = 0; rpra && lrpra && oix < inbufs + outbufs; ++oix) {
+	for (oix = 0; rpra && oix < inbufs + outbufs; ++oix) {
 		int i = ctx->overps[oix]->raix;
 		struct fastrpc_mmap *map = ctx->maps[i];
 		size_t mlen;
@@ -1628,7 +1632,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 		VERIFY(err, rlen >= mlen);
 		if (err)
 			goto bail;
-		rpra[i].buf.pv = lrpra[i].buf.pv =
+		rpra[i].buf.pv =
 			 (args - ctx->overps[oix]->offset);
 		pages[list[i].pgidx].addr = ctx->buf->phys -
 					    ctx->overps[oix]->offset +
@@ -1661,7 +1665,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 		if (map && (map->attr & FASTRPC_ATTR_COHERENT))
 			continue;
 
-		if (rpra && lrpra && rpra[i].buf.len &&
+		if (rpra && rpra[i].buf.len &&
 			ctx->overps[oix]->mstart) {
 			if (map && map->buf) {
 				dma_buf_begin_cpu_access(map->buf,
@@ -1675,13 +1679,15 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 		}
 	}
 	PERF_END);
-	for (i = bufs; rpra && lrpra && i < bufs + handles; i++) {
-		rpra[i].dma.fd = lrpra[i].dma.fd = ctx->fds[i];
-		rpra[i].dma.len = lrpra[i].dma.len = (uint32_t)lpra[i].buf.len;
-		rpra[i].dma.offset = lrpra[i].dma.offset =
-			 (uint32_t)(uintptr_t)lpra[i].buf.pv;
+	for (i = bufs; rpra && i < bufs + handles; i++) {
+		rpra[i].dma.fd = ctx->fds[i];
+		rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
+		rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
 	}
 
+	/* Copy rpra to local buffer */
+	if (ctx->lrpra && rpra && lrpralen > 0)
+		memcpy(ctx->lrpra, rpra, lrpralen);
  bail:
 	return err;
 }
@@ -2784,7 +2790,8 @@ static int fastrpc_internal_munmap(struct fastrpc_file *fl,
 
 	spin_lock(&fl->hlock);
 	hlist_for_each_entry_safe(rbuf, n, &fl->remote_bufs, hn_rem) {
-		if (rbuf->raddr && (rbuf->flags == ADSP_MMAP_ADD_PAGES)) {
+		if (rbuf->raddr && ((rbuf->flags == ADSP_MMAP_ADD_PAGES) ||
+				    (rbuf->flags == ADSP_MMAP_ADD_PAGES_LLC))) {
 			if ((rbuf->raddr == ud->vaddrout) &&
 				(rbuf->size == ud->size)) {
 				free = rbuf;
@@ -2876,7 +2883,8 @@ static int fastrpc_internal_mmap(struct fastrpc_file *fl,
 		goto bail;
 	}
 	mutex_lock(&fl->internal_map_mutex);
-	if (ud->flags == ADSP_MMAP_ADD_PAGES) {
+	if ((ud->flags == ADSP_MMAP_ADD_PAGES) ||
+	    (ud->flags == ADSP_MMAP_ADD_PAGES_LLC)) {
 		if (ud->vaddrin) {
 			err = -EINVAL;
 			pr_err("adsprpc: %s: %s: ERROR: adding user allocated pages is not supported\n",
@@ -2887,6 +2895,8 @@ static int fastrpc_internal_mmap(struct fastrpc_file *fl,
 					DMA_ATTR_DELAYED_UNMAP |
 					DMA_ATTR_NO_KERNEL_MAPPING |
 					DMA_ATTR_FORCE_NON_COHERENT;
+		if (ud->flags == ADSP_MMAP_ADD_PAGES_LLC)
+			dma_attr |= DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
 		err = fastrpc_buf_alloc(fl, ud->size, dma_attr, ud->flags,
 								1, &rbuf);
 		if (err)
@@ -3683,7 +3693,7 @@ static int fastrpc_getperf(struct fastrpc_ioctl_perf *ioctl_perf,
 				param, sizeof(*ioctl_perf));
 	if (err)
 		goto bail;
-	ioctl_perf->numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
+	ioctl_perf->numkeys = PERF_KEY_MAX;
 	if (ioctl_perf->keys) {
 		char *keys = PERF_KEYS;
 
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index c45de725..6d84722 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -780,7 +780,7 @@ struct diagchar_dev {
 	int dci_tag;
 	int dci_client_id[MAX_DCI_CLIENTS];
 	struct mutex dci_mutex;
-	spinlock_t rpmsginfo_lock[NUM_PERIPHERALS];
+	struct mutex rpmsginfo_mutex[NUM_PERIPHERALS];
 	int num_dci_client;
 	unsigned char *apps_dci_buf;
 	int dci_state;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index e3e6d75..9d5417d 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -4360,7 +4360,7 @@ static int __init diagchar_init(void)
 	mutex_init(&driver->hdlc_recovery_mutex);
 	for (i = 0; i < NUM_PERIPHERALS; i++) {
 		mutex_init(&driver->diagfwd_channel_mutex[i]);
-		spin_lock_init(&driver->rpmsginfo_lock[i]);
+		mutex_init(&driver->rpmsginfo_mutex[i]);
 		driver->diag_id_sent[i] = 0;
 	}
 	init_waitqueue_head(&driver->wait_q);
diff --git a/drivers/char/diag/diagfwd_rpmsg.c b/drivers/char/diag/diagfwd_rpmsg.c
index 6dda72a..c1262c1 100644
--- a/drivers/char/diag/diagfwd_rpmsg.c
+++ b/drivers/char/diag/diagfwd_rpmsg.c
@@ -391,17 +391,12 @@ static void diag_state_open_rpmsg(void *ctxt)
 static void diag_rpmsg_queue_read(void *ctxt)
 {
 	struct diag_rpmsg_info *rpmsg_info = NULL;
-	unsigned long flags;
 
 	if (!ctxt)
 		return;
 
 	rpmsg_info = (struct diag_rpmsg_info *)ctxt;
-	spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
-	if (rpmsg_info->hdl && rpmsg_info->wq &&
-		atomic_read(&rpmsg_info->opened))
-		queue_work(rpmsg_info->wq, &(rpmsg_info->read_work));
-	spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+	queue_work(rpmsg_info->wq, &(rpmsg_info->read_work));
 }
 
 static void diag_state_close_rpmsg(void *ctxt)
@@ -435,7 +430,6 @@ static int diag_rpmsg_read(void *ctxt, unsigned char *buf, int buf_len)
 	struct diag_rpmsg_info *rpmsg_info =  NULL;
 	struct diagfwd_info *fwd_info = NULL;
 	int ret_val = 0;
-	unsigned long flags;
 
 	if (!ctxt || !buf || buf_len <= 0)
 		return -EIO;
@@ -446,16 +440,15 @@ static int diag_rpmsg_read(void *ctxt, unsigned char *buf, int buf_len)
 		return -EIO;
 	}
 
-	spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+	mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 	if (!atomic_read(&rpmsg_info->opened) ||
 		!rpmsg_info->hdl || !rpmsg_info->inited) {
 		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
 			"diag:RPMSG channel not opened");
-		spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
-			flags);
+		mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 		return -EIO;
 	}
-	spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+	mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 
 	fwd_info = rpmsg_info->fwd_ctxt;
 
@@ -479,25 +472,22 @@ static void diag_rpmsg_read_work_fn(struct work_struct *work)
 	struct diag_rpmsg_info *rpmsg_info = container_of(work,
 							struct diag_rpmsg_info,
 							read_work);
-	unsigned long flags;
 
 	if (!rpmsg_info)
 		return;
 
-	spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+	mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 
 	if (!atomic_read(&rpmsg_info->opened)) {
-		spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
-			flags);
+		mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 		return;
 	}
 	if (!rpmsg_info->inited) {
-		spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
-			flags);
+		mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 		diag_ws_release();
 		return;
 	}
-	spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+	mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 
 	diagfwd_channel_read(rpmsg_info->fwd_ctxt);
 }
@@ -507,7 +497,6 @@ static int  diag_rpmsg_write(void *ctxt, unsigned char *buf, int len)
 	struct diag_rpmsg_info *rpmsg_info = NULL;
 	int err = 0;
 	struct rpmsg_device *rpdev = NULL;
-	unsigned long flags;
 
 	if (!ctxt || !buf)
 		return -EIO;
@@ -519,16 +508,14 @@ static int  diag_rpmsg_write(void *ctxt, unsigned char *buf, int len)
 		return -EINVAL;
 	}
 
-	spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+	mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 	if (!rpmsg_info->inited || !rpmsg_info->hdl ||
 		!atomic_read(&rpmsg_info->opened)) {
 		pr_err_ratelimited("diag: In %s, rpmsg not inited, rpmsg_info: %pK, buf: %pK, len: %d\n",
 				 __func__, rpmsg_info, buf, len);
-		spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
-			flags);
+		mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 		return -ENODEV;
 	}
-	spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
 
 	rpdev = (struct rpmsg_device *)rpmsg_info->hdl;
 	err = rpmsg_send(rpdev->ept, buf, len);
@@ -538,6 +525,7 @@ static int  diag_rpmsg_write(void *ctxt, unsigned char *buf, int len)
 	} else
 		err = -ENOMEM;
 
+	mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 	return err;
 
 }
@@ -547,18 +535,16 @@ static void diag_rpmsg_late_init_work_fn(struct work_struct *work)
 	struct diag_rpmsg_info *rpmsg_info = container_of(work,
 							struct diag_rpmsg_info,
 							late_init_work);
-	unsigned long flags;
 
 	if (!rpmsg_info)
 		return;
 
-	spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+	mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 	if (!rpmsg_info->hdl) {
-		spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
-			flags);
+		mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 		return;
 	}
-	spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+	mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 
 	diagfwd_channel_open(rpmsg_info->fwd_ctxt);
 	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "rpmsg late init p: %d t: %d\n",
@@ -571,18 +557,16 @@ static void diag_rpmsg_open_work_fn(struct work_struct *work)
 	struct diag_rpmsg_info *rpmsg_info = container_of(work,
 							struct diag_rpmsg_info,
 							open_work);
-	unsigned long flags;
 
 	if (!rpmsg_info)
 		return;
 
-	spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+	mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 	if (!rpmsg_info->inited) {
-		spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
-			flags);
+		mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 		return;
 	}
-	spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+	mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 
 	if (rpmsg_info->type != TYPE_CNTL) {
 		diagfwd_channel_open(rpmsg_info->fwd_ctxt);
@@ -597,19 +581,17 @@ static void diag_rpmsg_close_work_fn(struct work_struct *work)
 	struct diag_rpmsg_info *rpmsg_info = container_of(work,
 							struct diag_rpmsg_info,
 							close_work);
-	unsigned long flags;
 
 	if (!rpmsg_info)
 		return;
 
-	spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+	mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 	if (!rpmsg_info->inited || !rpmsg_info->hdl) {
-		spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
-			flags);
+		mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 		return;
 	}
 	rpmsg_info->hdl = NULL;
-	spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+	mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 	diagfwd_channel_close(rpmsg_info->fwd_ctxt);
 }
 
@@ -722,20 +704,18 @@ static void rpmsg_late_init(struct diag_rpmsg_info *rpmsg_info)
 
 int diag_rpmsg_init_peripheral(uint8_t peripheral)
 {
-	unsigned long flags;
-
 	if (peripheral >= NUM_PERIPHERALS) {
 		pr_err("diag: In %s, invalid peripheral %d\n", __func__,
 			peripheral);
 		return -EINVAL;
 	}
 
-	spin_lock_irqsave(&driver->rpmsginfo_lock[peripheral], flags);
+	mutex_lock(&driver->rpmsginfo_mutex[peripheral]);
 	rpmsg_late_init(&rpmsg_data[peripheral]);
 	rpmsg_late_init(&rpmsg_dci[peripheral]);
 	rpmsg_late_init(&rpmsg_cmd[peripheral]);
 	rpmsg_late_init(&rpmsg_dci_cmd[peripheral]);
-	spin_unlock_irqrestore(&driver->rpmsginfo_lock[peripheral], flags);
+	mutex_unlock(&driver->rpmsginfo_mutex[peripheral]);
 
 	return 0;
 }
@@ -743,7 +723,6 @@ int diag_rpmsg_init_peripheral(uint8_t peripheral)
 static void __diag_rpmsg_init(struct diag_rpmsg_info *rpmsg_info)
 {
 	char wq_name[DIAG_RPMSG_NAME_SZ + 12];
-	unsigned long flags;
 
 	if (!rpmsg_info)
 		return;
@@ -763,7 +742,7 @@ static void __diag_rpmsg_init(struct diag_rpmsg_info *rpmsg_info)
 	INIT_WORK(&(rpmsg_info->close_work), diag_rpmsg_close_work_fn);
 	INIT_WORK(&(rpmsg_info->read_work), diag_rpmsg_read_work_fn);
 	INIT_WORK(&(rpmsg_info->late_init_work), diag_rpmsg_late_init_work_fn);
-	spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+	mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 	rpmsg_info->hdl = NULL;
 	rpmsg_info->fwd_ctxt = NULL;
 	atomic_set(&rpmsg_info->opened, 0);
@@ -772,7 +751,7 @@ static void __diag_rpmsg_init(struct diag_rpmsg_info *rpmsg_info)
 		"%s initialized fwd_ctxt: %pK hdl: %pK\n",
 		rpmsg_info->name, rpmsg_info->fwd_ctxt,
 		rpmsg_info->hdl);
-	spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+	mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 }
 
 void diag_rpmsg_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt)
@@ -790,7 +769,6 @@ int diag_rpmsg_init(void)
 {
 	uint8_t peripheral;
 	struct diag_rpmsg_info *rpmsg_info = NULL;
-	unsigned long flags;
 
 	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
 		if (peripheral != PERIPHERAL_WDSP)
@@ -800,10 +778,9 @@ int diag_rpmsg_init(void)
 		diagfwd_cntl_register(TRANSPORT_RPMSG, rpmsg_info->peripheral,
 					(void *)rpmsg_info, &rpmsg_ops,
 					&(rpmsg_info->fwd_ctxt));
-		spin_lock_irqsave(&driver->rpmsginfo_lock[peripheral], flags);
+		mutex_lock(&driver->rpmsginfo_mutex[peripheral]);
 		rpmsg_info->inited = 1;
-		spin_unlock_irqrestore(&driver->rpmsginfo_lock[peripheral],
-			flags);
+		mutex_unlock(&driver->rpmsginfo_mutex[peripheral]);
 		diagfwd_channel_open(rpmsg_info->fwd_ctxt);
 		diagfwd_late_open(rpmsg_info->fwd_ctxt);
 		__diag_rpmsg_init(&rpmsg_data[peripheral]);
@@ -836,31 +813,27 @@ static void __diag_rpmsg_exit(struct diag_rpmsg_info *rpmsg_info)
 void diag_rpmsg_early_exit(void)
 {
 	int peripheral = 0;
-	unsigned long flags;
 
 	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
 		if (peripheral != PERIPHERAL_WDSP)
 			continue;
-		spin_lock_irqsave(&driver->rpmsginfo_lock[peripheral], flags);
+		mutex_lock(&driver->rpmsginfo_mutex[peripheral]);
 		__diag_rpmsg_exit(&rpmsg_cntl[peripheral]);
-		spin_unlock_irqrestore(&driver->rpmsginfo_lock[peripheral],
-			flags);
+		mutex_unlock(&driver->rpmsginfo_mutex[peripheral]);
 	}
 }
 
 void diag_rpmsg_exit(void)
 {
 	int peripheral = 0;
-	unsigned long flags;
 
 	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
-		spin_lock_irqsave(&driver->rpmsginfo_lock[peripheral], flags);
+		mutex_lock(&driver->rpmsginfo_mutex[peripheral]);
 		__diag_rpmsg_exit(&rpmsg_data[peripheral]);
 		__diag_rpmsg_exit(&rpmsg_cmd[peripheral]);
 		__diag_rpmsg_exit(&rpmsg_dci[peripheral]);
 		__diag_rpmsg_exit(&rpmsg_dci_cmd[peripheral]);
-		spin_unlock_irqrestore(&driver->rpmsginfo_lock[peripheral],
-			flags);
+		mutex_unlock(&driver->rpmsginfo_mutex[peripheral]);
 	}
 }
 
@@ -886,7 +859,6 @@ static struct diag_rpmsg_info *diag_get_rpmsg_ptr(char *name)
 static int diag_rpmsg_probe(struct rpmsg_device *rpdev)
 {
 	struct diag_rpmsg_info *rpmsg_info = NULL;
-	unsigned long flags;
 
 	if (!rpdev)
 		return 0;
@@ -896,11 +868,10 @@ static int diag_rpmsg_probe(struct rpmsg_device *rpdev)
 	rpmsg_info = diag_get_rpmsg_ptr(rpdev->id.name);
 	if (rpmsg_info) {
 
-		spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+		mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 		rpmsg_info->hdl = rpdev;
 		atomic_set(&rpmsg_info->opened, 1);
-		spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
-			flags);
+		mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 
 		dev_set_drvdata(&rpdev->dev, rpmsg_info);
 		diagfwd_channel_read(rpmsg_info->fwd_ctxt);
@@ -913,17 +884,15 @@ static int diag_rpmsg_probe(struct rpmsg_device *rpdev)
 static void diag_rpmsg_remove(struct rpmsg_device *rpdev)
 {
 	struct diag_rpmsg_info *rpmsg_info = NULL;
-	unsigned long flags;
 
 	if (!rpdev)
 		return;
 
 	rpmsg_info = diag_get_rpmsg_ptr(rpdev->id.name);
 	if (rpmsg_info) {
-		spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+		mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 		atomic_set(&rpmsg_info->opened, 0);
-		spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
-			flags);
+		mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 		queue_work(rpmsg_info->wq, &rpmsg_info->close_work);
 	}
 }
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index 3348136..1131524 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -153,6 +153,8 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
 			continue;
 
 		div = DIV_ROUND_CLOSEST(parent_rate, req->rate);
+		if (div > GENERATED_MAX_DIV + 1)
+			div = GENERATED_MAX_DIV + 1;
 
 		clk_generated_best_diff(req, parent, parent_rate, div,
 					&best_diff, &best_rate);
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 89fa68b..cd46b1d 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -416,3 +416,28 @@
 	  Support for the global clock controller on Bengal devices.
 	  Say Y if you want to use peripheral devices such as UART, SPI,
 	  I2C, USB, UFS, SDCC, PCIe, Camera, Video etc.
+
+config SM_GPUCC_BENGAL
+	tristate "BENGAL Graphics Clock Controller"
+	select SM_GCC_BENGAL
+	help
+	  Support for the graphics clock controller on Qualcomm Technologies, Inc
+	  BENGAL devices.
+	  Say Y if you want to support graphics controller devices.
+
+config SM_DISPCC_BENGAL
+	tristate "BENGAL Display Clock Controller"
+	select SM_GCC_BENGAL
+	help
+	  Support for the display clock controller on Qualcomm Technologies, Inc.
+	  BENGAL devices.
+	  Say Y if you want to support display devices and functionality such as
+	  splash screen.
+
+config SM_DEBUGCC_BENGAL
+	tristate "BENGAL Debug Clock Controller"
+	select SM_GCC_BENGAL
+	help
+	  Support for the debug clock controller on Qualcomm Technologies, Inc
+	  BENGAL devices.
+	  Say Y if you want to support the clock measurement functionality.
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index d70dbc3..dbf60b9 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -53,11 +53,14 @@
 obj-$(CONFIG_SDM_GCC_845) += gcc-sdm845.o
 obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
 obj-$(CONFIG_SM_CAMCC_LITO) += camcc-lito.o
+obj-$(CONFIG_SM_DEBUGCC_BENGAL) += debugcc-bengal.o
 obj-$(CONFIG_SM_DEBUGCC_LITO) += debugcc-lito.o
+obj-$(CONFIG_SM_DISPCC_BENGAL) += dispcc-bengal.o
 obj-$(CONFIG_SM_DISPCC_LITO) += dispcc-lito.o
 obj-$(CONFIG_SM_GCC_BENGAL) += gcc-bengal.o
 obj-$(CONFIG_SM_GCC_LITO) += gcc-lito.o
+obj-$(CONFIG_SM_GPUCC_BENGAL) += gpucc-bengal.o
+obj-$(CONFIG_SM_GPUCC_LITO) += gpucc-lito.o
 obj-$(CONFIG_SM_NPUCC_LITO) += npucc-lito.o
 obj-$(CONFIG_SM_VIDEOCC_LITO) += videocc-lito.o
-obj-$(CONFIG_SM_GPUCC_LITO) += gpucc-lito.o
 obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
diff --git a/drivers/clk/qcom/camcc-kona.c b/drivers/clk/qcom/camcc-kona.c
index f15be8e..a208f54 100644
--- a/drivers/clk/qcom/camcc-kona.c
+++ b/drivers/clk/qcom/camcc-kona.c
@@ -377,9 +377,9 @@ static struct clk_alpha_pll_postdiv cam_cc_pll2_out_main = {
 };
 
 static const struct alpha_pll_config cam_cc_pll3_config = {
-	.l = 0xF,
+	.l = 0x24,
 	.cal_l = 0x44,
-	.alpha = 0xA000,
+	.alpha = 0x7555,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
 	.config_ctl_hi1_val = 0x029A699C,
@@ -432,9 +432,9 @@ static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
 };
 
 static const struct alpha_pll_config cam_cc_pll4_config = {
-	.l = 0xF,
+	.l = 0x24,
 	.cal_l = 0x44,
-	.alpha = 0xA000,
+	.alpha = 0x7555,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
 	.config_ctl_hi1_val = 0x029A699C,
@@ -876,16 +876,19 @@ static struct clk_rcg2 cam_cc_icp_clk_src = {
 };
 
 static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
-	F(150000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
-	F(200000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
-	F(250000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
-	F(300000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(350000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
-	F(425000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
 	F(475000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
-	F(525000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
 	F(576000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
-	F(630000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	F(720000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src_kona_v2[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(350000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	F(475000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	F(576000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
 	F(680000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
 	{ }
 };
@@ -943,16 +946,19 @@ static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
 };
 
 static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src[] = {
-	F(150000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
-	F(200000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
-	F(250000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
-	F(300000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(350000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
-	F(425000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
 	F(475000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
-	F(525000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
 	F(576000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
-	F(630000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+	F(720000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src_kona_v2[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(350000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+	F(475000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+	F(576000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
 	F(680000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
 	{ }
 };
@@ -2714,7 +2720,9 @@ static void cam_cc_kona_fixup_konav2(struct regmap *regmap)
 	cam_cc_bps_clk_src.freq_tbl = ftbl_cam_cc_bps_clk_src_kona_v2;
 	cam_cc_fd_core_clk_src.freq_tbl = ftbl_cam_cc_fd_core_clk_src_kona_v2;
 	cam_cc_icp_clk_src.freq_tbl = ftbl_cam_cc_fd_core_clk_src_kona_v2;
+	cam_cc_ife_0_clk_src.freq_tbl = ftbl_cam_cc_ife_0_clk_src_kona_v2;
 	cam_cc_ife_0_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = 680000000;
+	cam_cc_ife_1_clk_src.freq_tbl = ftbl_cam_cc_ife_1_clk_src_kona_v2;
 	cam_cc_ife_1_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = 680000000;
 	cam_cc_ife_lite_clk_src.freq_tbl = ftbl_cam_cc_ife_lite_clk_src_kona_v2;
 	cam_cc_jpeg_clk_src.freq_tbl = ftbl_cam_cc_bps_clk_src_kona_v2;
diff --git a/drivers/clk/qcom/camcc-lito.c b/drivers/clk/qcom/camcc-lito.c
index 125e082..20c3295 100644
--- a/drivers/clk/qcom/camcc-lito.c
+++ b/drivers/clk/qcom/camcc-lito.c
@@ -24,7 +24,7 @@
 #include "clk-rcg.h"
 #include "clk-regmap.h"
 #include "common.h"
-#include "vdd-level.h"
+#include "vdd-level-lito.h"
 
 static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
 static DEFINE_VDD_REGULATORS(vdd_mx, VDD_NUM, 1, vdd_corner);
diff --git a/drivers/clk/qcom/debugcc-bengal.c b/drivers/clk/qcom/debugcc-bengal.c
new file mode 100644
index 0000000..38b57e8
--- /dev/null
+++ b/drivers/clk/qcom/debugcc-bengal.c
@@ -0,0 +1,535 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#include "clk-debug.h"
+#include "common.h"
+
+static struct measure_clk_data debug_mux_priv = {
+	.ctl_reg = 0x62038,
+	.status_reg = 0x6203C,
+	.xo_div4_cbcr = 0x28008,
+};
+
+static const char *const cpu_cc_debug_mux_parent_names[] = {
+	"perfcl_clk",
+	"pwrcl_clk",
+};
+
+static int cpu_cc_debug_mux_sels[] = {
+	0x25,		/* perfcl_clk */
+	0x21,		/* pwrcl_clk */
+};
+
+static int apss_cc_debug_mux_pre_divs[] = {
+	0x8,		/* perfcl_clk */
+	0x4,		/* pwrcl_clk */
+};
+
+static struct clk_debug_mux cpu_cc_debug_mux = {
+	.priv = &debug_mux_priv,
+	.debug_offset = 0x0,
+	.post_div_offset = 0x0,
+	.cbcr_offset = U32_MAX,
+	.src_sel_mask = 0x3FF,
+	.src_sel_shift = 8,
+	.post_div_mask = 0xF,
+	.post_div_shift = 28,
+	.post_div_val = 1,
+	.mux_sels = cpu_cc_debug_mux_sels,
+	.pre_div_vals = apss_cc_debug_mux_pre_divs,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu_cc_debug_mux",
+		.ops = &clk_debug_mux_ops,
+		.parent_names = cpu_cc_debug_mux_parent_names,
+		.num_parents = ARRAY_SIZE(cpu_cc_debug_mux_parent_names),
+		.flags = CLK_IS_MEASURE,
+	},
+};
+
+
+static const char *const disp_cc_debug_mux_parent_names[] = {
+	"disp_cc_mdss_ahb_clk",
+	"disp_cc_mdss_byte0_clk",
+	"disp_cc_mdss_byte0_intf_clk",
+	"disp_cc_mdss_esc0_clk",
+	"disp_cc_mdss_mdp_clk",
+	"disp_cc_mdss_mdp_lut_clk",
+	"disp_cc_mdss_non_gdsc_ahb_clk",
+	"disp_cc_mdss_pclk0_clk",
+	"disp_cc_mdss_rot_clk",
+	"disp_cc_mdss_vsync_clk",
+	"disp_cc_sleep_clk",
+	"disp_cc_xo_clk",
+};
+
+static int disp_cc_debug_mux_sels[] = {
+	0x1A,		/* disp_cc_mdss_ahb_clk */
+	0x12,		/* disp_cc_mdss_byte0_clk */
+	0x13,		/* disp_cc_mdss_byte0_intf_clk */
+	0x14,		/* disp_cc_mdss_esc0_clk */
+	0xE,		/* disp_cc_mdss_mdp_clk */
+	0x10,		/* disp_cc_mdss_mdp_lut_clk */
+	0x1B,		/* disp_cc_mdss_non_gdsc_ahb_clk */
+	0xD,		/* disp_cc_mdss_pclk0_clk */
+	0xF,		/* disp_cc_mdss_rot_clk */
+	0x11,		/* disp_cc_mdss_vsync_clk */
+	0x24,		/* disp_cc_sleep_clk */
+	0x23,		/* disp_cc_xo_clk */
+};
+
+static struct clk_debug_mux disp_cc_debug_mux = {
+	.priv = &debug_mux_priv,
+	.debug_offset = 0x7000,
+	.post_div_offset = 0x5008,
+	.cbcr_offset = 0x500C,
+	.src_sel_mask = 0xFF,
+	.src_sel_shift = 0,
+	.post_div_mask = 0x3,
+	.post_div_shift = 0,
+	.post_div_val = 4,
+	.mux_sels = disp_cc_debug_mux_sels,
+	.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_debug_mux",
+		.ops = &clk_debug_mux_ops,
+		.parent_names = disp_cc_debug_mux_parent_names,
+		.num_parents = ARRAY_SIZE(disp_cc_debug_mux_parent_names),
+		.flags = CLK_IS_MEASURE,
+	},
+};
+
+static const char *const gcc_debug_mux_parent_names[] = {
+	"cpu_cc_debug_mux",
+	"disp_cc_debug_mux",
+	"gcc_ahb2phy_csi_clk",
+	"gcc_ahb2phy_usb_clk",
+	"gcc_apc_vs_clk",
+	"gcc_bimc_gpu_axi_clk",
+	"gcc_boot_rom_ahb_clk",
+	"gcc_cam_throttle_nrt_clk",
+	"gcc_cam_throttle_rt_clk",
+	"gcc_camera_ahb_clk",
+	"gcc_camera_xo_clk",
+	"gcc_camss_axi_clk",
+	"gcc_camss_camnoc_atb_clk",
+	"gcc_camss_camnoc_nts_xo_clk",
+	"gcc_camss_cci_0_clk",
+	"gcc_camss_cphy_0_clk",
+	"gcc_camss_cphy_1_clk",
+	"gcc_camss_cphy_2_clk",
+	"gcc_camss_csi0phytimer_clk",
+	"gcc_camss_csi1phytimer_clk",
+	"gcc_camss_csi2phytimer_clk",
+	"gcc_camss_mclk0_clk",
+	"gcc_camss_mclk1_clk",
+	"gcc_camss_mclk2_clk",
+	"gcc_camss_mclk3_clk",
+	"gcc_camss_nrt_axi_clk",
+	"gcc_camss_ope_ahb_clk",
+	"gcc_camss_ope_clk",
+	"gcc_camss_rt_axi_clk",
+	"gcc_camss_tfe_0_clk",
+	"gcc_camss_tfe_0_cphy_rx_clk",
+	"gcc_camss_tfe_0_csid_clk",
+	"gcc_camss_tfe_1_clk",
+	"gcc_camss_tfe_1_cphy_rx_clk",
+	"gcc_camss_tfe_1_csid_clk",
+	"gcc_camss_tfe_2_clk",
+	"gcc_camss_tfe_2_cphy_rx_clk",
+	"gcc_camss_tfe_2_csid_clk",
+	"gcc_camss_top_ahb_clk",
+	"gcc_cfg_noc_usb3_prim_axi_clk",
+	"gcc_cpuss_ahb_clk",
+	"gcc_cpuss_gnoc_clk",
+	"gcc_cpuss_throttle_core_clk",
+	"gcc_cpuss_throttle_xo_clk",
+	"gcc_disp_ahb_clk",
+	"gcc_disp_gpll0_div_clk_src",
+	"gcc_disp_hf_axi_clk",
+	"gcc_disp_throttle_core_clk",
+	"gcc_disp_xo_clk",
+	"gcc_gp1_clk",
+	"gcc_gp2_clk",
+	"gcc_gp3_clk",
+	"gcc_gpu_cfg_ahb_clk",
+	"gcc_gpu_gpll0_clk_src",
+	"gcc_gpu_gpll0_div_clk_src",
+	"gcc_gpu_memnoc_gfx_clk",
+	"gcc_gpu_snoc_dvm_gfx_clk",
+	"gcc_gpu_throttle_core_clk",
+	"gcc_gpu_throttle_xo_clk",
+	"gcc_mss_vs_clk",
+	"gcc_pdm2_clk",
+	"gcc_pdm_ahb_clk",
+	"gcc_pdm_xo4_clk",
+	"gcc_prng_ahb_clk",
+	"gcc_qmip_camera_nrt_ahb_clk",
+	"gcc_qmip_camera_rt_ahb_clk",
+	"gcc_qmip_cpuss_cfg_ahb_clk",
+	"gcc_qmip_disp_ahb_clk",
+	"gcc_qmip_gpu_cfg_ahb_clk",
+	"gcc_qmip_video_vcodec_ahb_clk",
+	"gcc_qupv3_wrap0_core_2x_clk",
+	"gcc_qupv3_wrap0_core_clk",
+	"gcc_qupv3_wrap_0_m_ahb_clk",
+	"gcc_qupv3_wrap_0_s_ahb_clk",
+	"gcc_sdcc1_ahb_clk",
+	"gcc_sdcc1_apps_clk",
+	"gcc_sdcc1_ice_core_clk",
+	"gcc_sdcc2_ahb_clk",
+	"gcc_sdcc2_apps_clk",
+	"gcc_sys_noc_cpuss_ahb_clk",
+	"gcc_sys_noc_ufs_phy_axi_clk",
+	"gcc_sys_noc_usb3_prim_axi_clk",
+	"gcc_ufs_phy_ahb_clk",
+	"gcc_ufs_phy_axi_clk",
+	"gcc_ufs_phy_ice_core_clk",
+	"gcc_ufs_phy_phy_aux_clk",
+	"gcc_ufs_phy_rx_symbol_0_clk",
+	"gcc_ufs_phy_tx_symbol_0_clk",
+	"gcc_ufs_phy_unipro_core_clk",
+	"gcc_usb30_prim_master_clk",
+	"gcc_usb30_prim_mock_utmi_clk",
+	"gcc_usb30_prim_sleep_clk",
+	"gcc_usb3_prim_phy_com_aux_clk",
+	"gcc_usb3_prim_phy_pipe_clk",
+	"gcc_vcodec0_axi_clk",
+	"gcc_vdda_vs_clk",
+	"gcc_vddcx_vs_clk",
+	"gcc_vddmx_vs_clk",
+	"gcc_venus_ahb_clk",
+	"gcc_venus_ctl_axi_clk",
+	"gcc_video_ahb_clk",
+	"gcc_video_axi0_clk",
+	"gcc_video_throttle_core_clk",
+	"gcc_video_vcodec0_sys_clk",
+	"gcc_video_venus_ctl_clk",
+	"gcc_video_xo_clk",
+	"gcc_vs_ctrl_ahb_clk",
+	"gcc_vs_ctrl_clk",
+	"gcc_wcss_vs_clk",
+	"gpu_cc_debug_mux",
+	"mc_cc_debug_mux",
+};
+
+static int gcc_debug_mux_sels[] = {
+	0xAF,		/* cpu_cc_debug_mux */
+	0x42,		/* disp_cc_debug_mux */
+	0x63,		/* gcc_ahb2phy_csi_clk */
+	0x64,		/* gcc_ahb2phy_usb_clk */
+	0xC3,		/* gcc_apc_vs_clk */
+	0x90,		/* gcc_bimc_gpu_axi_clk */
+	0x76,		/* gcc_boot_rom_ahb_clk */
+	0x4C,		/* gcc_cam_throttle_nrt_clk */
+	0x4B,		/* gcc_cam_throttle_rt_clk */
+	0x37,		/* gcc_camera_ahb_clk */
+	0x3F,		/* gcc_camera_xo_clk */
+	0x136,		/* gcc_camss_axi_clk */
+	0x138,		/* gcc_camss_camnoc_atb_clk */
+	0x139,		/* gcc_camss_camnoc_nts_xo_clk */
+	0x134,		/* gcc_camss_cci_0_clk */
+	0x128,		/* gcc_camss_cphy_0_clk */
+	0x129,		/* gcc_camss_cphy_1_clk */
+	0x12A,		/* gcc_camss_cphy_2_clk */
+	0x11A,		/* gcc_camss_csi0phytimer_clk */
+	0x11B,		/* gcc_camss_csi1phytimer_clk */
+	0x11C,		/* gcc_camss_csi2phytimer_clk */
+	0x11D,		/* gcc_camss_mclk0_clk */
+	0x11E,		/* gcc_camss_mclk1_clk */
+	0x11F,		/* gcc_camss_mclk2_clk */
+	0x120,		/* gcc_camss_mclk3_clk */
+	0x13A,		/* gcc_camss_nrt_axi_clk */
+	0x133,		/* gcc_camss_ope_ahb_clk */
+	0x131,		/* gcc_camss_ope_clk */
+	0x13C,		/* gcc_camss_rt_axi_clk */
+	0x121,		/* gcc_camss_tfe_0_clk */
+	0x125,		/* gcc_camss_tfe_0_cphy_rx_clk */
+	0x12B,		/* gcc_camss_tfe_0_csid_clk */
+	0x122,		/* gcc_camss_tfe_1_clk */
+	0x126,		/* gcc_camss_tfe_1_cphy_rx_clk */
+	0x12D,		/* gcc_camss_tfe_1_csid_clk */
+	0x123,		/* gcc_camss_tfe_2_clk */
+	0x127,		/* gcc_camss_tfe_2_cphy_rx_clk */
+	0x12F,		/* gcc_camss_tfe_2_csid_clk */
+	0x135,		/* gcc_camss_top_ahb_clk */
+	0x1D,		/* gcc_cfg_noc_usb3_prim_axi_clk */
+	0xA9,		/* gcc_cpuss_ahb_clk */
+	0xAA,		/* gcc_cpuss_gnoc_clk */
+	0xB2,		/* gcc_cpuss_throttle_core_clk */
+	0xB1,		/* gcc_cpuss_throttle_xo_clk */
+	0x38,		/* gcc_disp_ahb_clk */
+	0x47,		/* gcc_disp_gpll0_div_clk_src */
+	0x3D,		/* gcc_disp_hf_axi_clk */
+	0x49,		/* gcc_disp_throttle_core_clk */
+	0x40,		/* gcc_disp_xo_clk */
+	0xBA,		/* gcc_gp1_clk */
+	0xBB,		/* gcc_gp2_clk */
+	0xBC,		/* gcc_gp3_clk */
+	0xE5,		/* gcc_gpu_cfg_ahb_clk */
+	0xEB,		/* gcc_gpu_gpll0_clk_src */
+	0xEC,		/* gcc_gpu_gpll0_div_clk_src */
+	0xE8,		/* gcc_gpu_memnoc_gfx_clk */
+	0xEA,		/* gcc_gpu_snoc_dvm_gfx_clk */
+	0xEF,		/* gcc_gpu_throttle_core_clk */
+	0xEE,		/* gcc_gpu_throttle_xo_clk */
+	0xC2,		/* gcc_mss_vs_clk */
+	0x73,		/* gcc_pdm2_clk */
+	0x71,		/* gcc_pdm_ahb_clk */
+	0x72,		/* gcc_pdm_xo4_clk */
+	0x74,		/* gcc_prng_ahb_clk */
+	0x3A,		/* gcc_qmip_camera_nrt_ahb_clk */
+	0x48,		/* gcc_qmip_camera_rt_ahb_clk */
+	0xB0,		/* gcc_qmip_cpuss_cfg_ahb_clk */
+	0x3B,		/* gcc_qmip_disp_ahb_clk */
+	0xED,		/* gcc_qmip_gpu_cfg_ahb_clk */
+	0x39,		/* gcc_qmip_video_vcodec_ahb_clk */
+	0x6A,		/* gcc_qupv3_wrap0_core_2x_clk */
+	0x69,		/* gcc_qupv3_wrap0_core_clk */
+	0x67,		/* gcc_qupv3_wrap_0_m_ahb_clk */
+	0x68,		/* gcc_qupv3_wrap_0_s_ahb_clk */
+	0xF3,		/* gcc_sdcc1_ahb_clk */
+	0xF2,		/* gcc_sdcc1_apps_clk */
+	0xF4,		/* gcc_sdcc1_ice_core_clk */
+	0x66,		/* gcc_sdcc2_ahb_clk */
+	0x65,		/* gcc_sdcc2_apps_clk */
+	0x9,		/* gcc_sys_noc_cpuss_ahb_clk */
+	0x19,		/* gcc_sys_noc_ufs_phy_axi_clk */
+	0x18,		/* gcc_sys_noc_usb3_prim_axi_clk */
+	0x111,		/* gcc_ufs_phy_ahb_clk */
+	0x110,		/* gcc_ufs_phy_axi_clk */
+	0x117,		/* gcc_ufs_phy_ice_core_clk */
+	0x118,		/* gcc_ufs_phy_phy_aux_clk */
+	0x113,		/* gcc_ufs_phy_rx_symbol_0_clk */
+	0x112,		/* gcc_ufs_phy_tx_symbol_0_clk */
+	0x116,		/* gcc_ufs_phy_unipro_core_clk */
+	0x5C,		/* gcc_usb30_prim_master_clk */
+	0x5E,		/* gcc_usb30_prim_mock_utmi_clk */
+	0x5D,		/* gcc_usb30_prim_sleep_clk */
+	0x5F,		/* gcc_usb3_prim_phy_com_aux_clk */
+	0x60,		/* gcc_usb3_prim_phy_pipe_clk */
+	0x142,		/* gcc_vcodec0_axi_clk */
+	0xBF,		/* gcc_vdda_vs_clk */
+	0xBD,		/* gcc_vddcx_vs_clk */
+	0xBE,		/* gcc_vddmx_vs_clk */
+	0x143,		/* gcc_venus_ahb_clk */
+	0x141,		/* gcc_venus_ctl_axi_clk */
+	0x36,		/* gcc_video_ahb_clk */
+	0x3C,		/* gcc_video_axi0_clk */
+	0x4A,		/* gcc_video_throttle_core_clk */
+	0x13F,		/* gcc_video_vcodec0_sys_clk */
+	0x13D,		/* gcc_video_venus_ctl_clk */
+	0x3E,		/* gcc_video_xo_clk */
+	0xC1,		/* gcc_vs_ctrl_ahb_clk */
+	0xC0,		/* gcc_vs_ctrl_clk */
+	0xC4,		/* gcc_wcss_vs_clk */
+	0xE7,		/* gpu_cc_debug_mux */
+	0x9E,           /* mc_cc_debug_mux */
+};
+
+static struct clk_debug_mux gcc_debug_mux = {
+	.priv = &debug_mux_priv,
+	.debug_offset = 0x62000,
+	.post_div_offset = 0x30000,
+	.cbcr_offset = 0x30004,
+	.src_sel_mask = 0x3FF,
+	.src_sel_shift = 0,
+	.post_div_mask = 0xF,
+	.post_div_shift = 0,
+	.post_div_val = 1,
+	.mux_sels = gcc_debug_mux_sels,
+	.hw.init = &(struct clk_init_data){
+		.name = "gcc_debug_mux",
+		.ops = &clk_debug_mux_ops,
+		.parent_names = gcc_debug_mux_parent_names,
+		.num_parents = ARRAY_SIZE(gcc_debug_mux_parent_names),
+		.flags = CLK_IS_MEASURE,
+	},
+};
+
+static const char *const gpu_cc_debug_mux_parent_names[] = {
+	"gpu_cc_ahb_clk",
+	"gpu_cc_crc_ahb_clk",
+	"gpu_cc_cx_gfx3d_clk",
+	"gpu_cc_cx_gfx3d_slv_clk",
+	"gpu_cc_cx_gmu_clk",
+	"gpu_cc_cx_snoc_dvm_clk",
+	"gpu_cc_cxo_aon_clk",
+	"gpu_cc_cxo_clk",
+	"gpu_cc_gx_cxo_clk",
+	"gpu_cc_gx_gfx3d_clk",
+	"gpu_cc_sleep_clk",
+};
+
+static int gpu_cc_debug_mux_sels[] = {
+	0x10,		/* gpu_cc_ahb_clk */
+	0x11,		/* gpu_cc_crc_ahb_clk */
+	0x1A,		/* gpu_cc_cx_gfx3d_clk */
+	0x1B,		/* gpu_cc_cx_gfx3d_slv_clk */
+	0x18,		/* gpu_cc_cx_gmu_clk */
+	0x15,		/* gpu_cc_cx_snoc_dvm_clk */
+	0xA,		/* gpu_cc_cxo_aon_clk */
+	0x19,		/* gpu_cc_cxo_clk */
+	0xE,		/* gpu_cc_gx_cxo_clk */
+	0xB,		/* gpu_cc_gx_gfx3d_clk */
+	0x16,		/* gpu_cc_sleep_clk */
+};
+
+static struct clk_debug_mux gpu_cc_debug_mux = {
+	.priv = &debug_mux_priv,
+	.debug_offset = 0x1568,
+	.post_div_offset = 0x10FC,
+	.cbcr_offset = 0x1100,
+	.src_sel_mask = 0xFF,
+	.src_sel_shift = 0,
+	.post_div_mask = 0x3,
+	.post_div_shift = 0,
+	.post_div_val = 2,
+	.mux_sels = gpu_cc_debug_mux_sels,
+	.hw.init = &(struct clk_init_data){
+		.name = "gpu_cc_debug_mux",
+		.ops = &clk_debug_mux_ops,
+		.parent_names = gpu_cc_debug_mux_parent_names,
+		.num_parents = ARRAY_SIZE(gpu_cc_debug_mux_parent_names),
+		.flags = CLK_IS_MEASURE,
+	},
+};
+
+static const char *const mc_cc_debug_mux_parent_names[] = {
+	"measure_only_mccc_clk",
+};
+
+static struct clk_debug_mux mc_cc_debug_mux = {
+	.period_offset = 0x50,
+	.hw.init = &(struct clk_init_data){
+		.name = "mc_cc_debug_mux",
+		.ops = &clk_debug_mux_ops,
+		.parent_names = mc_cc_debug_mux_parent_names,
+		.num_parents = ARRAY_SIZE(mc_cc_debug_mux_parent_names),
+		.flags = CLK_IS_MEASURE,
+	},
+};
+
+static struct mux_regmap_names mux_list[] = {
+	{ .mux = &cpu_cc_debug_mux, .regmap_name = "qcom,cpucc" },
+	{ .mux = &disp_cc_debug_mux, .regmap_name = "qcom,dispcc" },
+	{ .mux = &gcc_debug_mux, .regmap_name = "qcom,gcc" },
+	{ .mux = &gpu_cc_debug_mux, .regmap_name = "qcom,gpucc" },
+	{ .mux = &mc_cc_debug_mux, .regmap_name = "qcom,mccc" },
+};
+
+static struct clk_dummy measure_only_mccc_clk = {
+	.rrate = 1000,
+	.hw.init = &(struct clk_init_data){
+		.name = "measure_only_mccc_clk",
+		.ops = &clk_dummy_ops,
+	},
+};
+
+static struct clk_dummy perfcl_clk = {
+	.rrate = 1000,
+	.hw.init = &(struct clk_init_data){
+		.name = "perfcl_clk",
+		.ops = &clk_dummy_ops,
+	},
+};
+
+static struct clk_dummy pwrcl_clk = {
+	.rrate = 1000,
+	.hw.init = &(struct clk_init_data){
+		.name = "pwrcl_clk",
+		.ops = &clk_dummy_ops,
+	},
+};
+
+struct clk_hw *debugcc_bengal_hws[] = {
+	&measure_only_mccc_clk.hw,
+	&perfcl_clk.hw,
+	&pwrcl_clk.hw,
+};
+
+static const struct of_device_id clk_debug_match_table[] = {
+	{ .compatible = "qcom,bengal-debugcc" },
+	{ }
+};
+
+static int clk_debug_bengal_probe(struct platform_device *pdev)
+{
+	struct clk *clk;
+	int i, ret;
+
+	BUILD_BUG_ON(ARRAY_SIZE(disp_cc_debug_mux_parent_names) !=
+		     ARRAY_SIZE(disp_cc_debug_mux_sels));
+	BUILD_BUG_ON(ARRAY_SIZE(gcc_debug_mux_parent_names) !=
+		     ARRAY_SIZE(gcc_debug_mux_sels));
+	BUILD_BUG_ON(ARRAY_SIZE(gpu_cc_debug_mux_parent_names) !=
+		     ARRAY_SIZE(gpu_cc_debug_mux_sels));
+
+	clk = devm_clk_get(&pdev->dev, "xo_clk_src");
+	if (IS_ERR(clk)) {
+		if (PTR_ERR(clk) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get xo clock\n");
+		return PTR_ERR(clk);
+	}
+
+	debug_mux_priv.cxo = clk;
+
+	for (i = 0; i < ARRAY_SIZE(mux_list); i++) {
+		ret = map_debug_bases(pdev, mux_list[i].regmap_name,
+				      mux_list[i].mux);
+		if (ret == -EBADR)
+			continue;
+		else if (ret)
+			return ret;
+
+		clk = devm_clk_register(&pdev->dev, &mux_list[i].mux->hw);
+		if (IS_ERR(clk)) {
+			dev_err(&pdev->dev, "Unable to register %s, err:(%d)\n",
+				mux_list[i].mux->hw.init->name, PTR_ERR(clk));
+			return PTR_ERR(clk);
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(debugcc_bengal_hws); i++) {
+		clk = devm_clk_register(&pdev->dev, debugcc_bengal_hws[i]);
+		if (IS_ERR(clk)) {
+			dev_err(&pdev->dev, "Unable to register %s, err:(%d)\n",
+			debugcc_bengal_hws[i]->init->name, PTR_ERR(clk));
+			return PTR_ERR(clk);
+		}
+	}
+
+	ret = clk_debug_measure_register(&gcc_debug_mux.hw);
+	if (ret)
+		dev_err(&pdev->dev, "Could not register Measure clock\n");
+
+	return ret;
+}
+
+static struct platform_driver clk_debug_driver = {
+	.probe = clk_debug_bengal_probe,
+	.driver = {
+		.name = "debugcc-bengal",
+		.of_match_table = clk_debug_match_table,
+	},
+};
+
+int __init clk_debug_bengal_init(void)
+{
+	return platform_driver_register(&clk_debug_driver);
+}
+fs_initcall(clk_debug_bengal_init);
+
+MODULE_DESCRIPTION("QTI DEBUG CC BENGAL Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/dispcc-bengal.c b/drivers/clk/qcom/dispcc-bengal.c
new file mode 100644
index 0000000..4f48c64
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-bengal.c
@@ -0,0 +1,722 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,dispcc-bengal.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "vdd-level-bengal.h"
+
+static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
+
+enum {
+	P_BI_TCXO,
+	P_CHIP_SLEEP_CLK,
+	P_CORE_BI_PLL_TEST_SE,
+	P_DISP_CC_PLL0_OUT_MAIN,
+	P_DSI0_PHY_PLL_OUT_BYTECLK,
+	P_DSI0_PHY_PLL_OUT_DSICLK,
+	P_DSI1_PHY_PLL_OUT_DSICLK,
+	P_GPLL0_OUT_MAIN,
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_DSI0_PHY_PLL_OUT_BYTECLK, 1 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_0[] = {
+	"bi_tcxo",
+	"dsi0_phy_pll_out_byteclk",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_DISP_CC_PLL0_OUT_MAIN, 1 },
+	{ P_GPLL0_OUT_MAIN, 4 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_1[] = {
+	"bi_tcxo",
+	"disp_cc_pll0_out_main",
+	"gpll0_out_main",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_2[] = {
+	"bi_tcxo",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_MAIN, 4 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_3[] = {
+	"bi_tcxo",
+	"gpll0_out_main",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+	{ P_DSI1_PHY_PLL_OUT_DSICLK, 2 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_4[] = {
+	"bi_tcxo",
+	"dsi0_phy_pll_out_dsiclk",
+	"dsi1_phy_pll_out_dsiclk",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+	{ P_CHIP_SLEEP_CLK, 0 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_5[] = {
+	"chip_sleep_clk",
+	"core_bi_pll_test_se",
+};
+
+static struct pll_vco spark_vco[] = {
+	{ 500000000, 1000000000, 2 },
+};
+
+/* 768MHz configuration */
+static const struct alpha_pll_config disp_cc_pll0_config = {
+	.l = 0x28,
+	.alpha = 0x0,
+	.alpha_en_mask = BIT(24),
+	.vco_val = 0x2 << 20,
+	.vco_mask = GENMASK(21, 20),
+	.main_output_mask = BIT(0),
+	.config_ctl_val = 0x40008529,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+	.offset = 0x0,
+	.vco_table = spark_vco,
+	.num_vco = ARRAY_SIZE(spark_vco),
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_pll0",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_ops,
+			.vdd_class = &vdd_cx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 1000000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_disp_cc_pll0_out_main[] = {
+	{ 0x0, 1 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv disp_cc_pll0_out_main = {
+	.offset = 0x0,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_disp_cc_pll0_out_main,
+	.num_post_div = ARRAY_SIZE(post_div_table_disp_cc_pll0_out_main),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_pll0_out_main",
+		.parent_names = (const char *[]){ "disp_cc_pll0" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_ops,
+	},
+};
+
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+	.reg = 0x20d4,
+	.shift = 0,
+	.width = 2,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "disp_cc_mdss_byte0_div_clk_src",
+		.parent_names =
+			(const char *[]){ "disp_cc_mdss_byte0_clk_src" },
+		.num_parents = 1,
+		.ops = &clk_regmap_div_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(37500000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+	F(75000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+	.cmd_rcgr = 0x2154,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_3,
+	.freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_ahb_clk_src",
+		.parent_names = disp_cc_parent_names_3,
+		.num_parents = 3,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_MIN] = 19200000,
+			[VDD_LOW] = 37500000,
+			[VDD_NOMINAL] = 75000000},
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+	.cmd_rcgr = 0x20bc,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_0,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_byte0_clk_src",
+		.parent_names = disp_cc_parent_names_0,
+		.num_parents = 3,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_byte2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_MIN] = 19200000,
+			[VDD_LOWER] = 164000000,
+			[VDD_LOW] = 187500000},
+	},
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_esc0_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+	.cmd_rcgr = 0x20d8,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_0,
+	.freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_esc0_clk_src",
+		.parent_names = disp_cc_parent_names_0,
+		.num_parents = 3,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_MIN] = 19200000},
+	},
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(192000000, P_DISP_CC_PLL0_OUT_MAIN, 4, 0, 0),
+	F(256000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+	F(307200000, P_DISP_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+	F(384000000, P_DISP_CC_PLL0_OUT_MAIN, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+	.cmd_rcgr = 0x2074,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_1,
+	.freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_mdp_clk_src",
+		.parent_names = disp_cc_parent_names_1,
+		.num_parents = 4,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_MIN] = 19200000,
+			[VDD_LOWER] = 192000000,
+			[VDD_LOW] = 256000000,
+			[VDD_LOW_L1] = 307200000,
+			[VDD_NOMINAL] = 384000000},
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+	.cmd_rcgr = 0x205c,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_4,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_pclk0_clk_src",
+		.parent_names = disp_cc_parent_names_4,
+		.num_parents = 4,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_pixel_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_MIN] = 19200000,
+			[VDD_LOWER] = 183310056,
+			[VDD_LOW] = 250000000},
+	},
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_rot_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(192000000, P_DISP_CC_PLL0_OUT_MAIN, 4, 0, 0),
+	F(256000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+	F(307200000, P_DISP_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 disp_cc_mdss_rot_clk_src = {
+	.cmd_rcgr = 0x208c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_1,
+	.freq_tbl = ftbl_disp_cc_mdss_rot_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_rot_clk_src",
+		.parent_names = disp_cc_parent_names_1,
+		.num_parents = 4,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_MIN] = 19200000,
+			[VDD_LOWER] = 192000000,
+			[VDD_LOW] = 256000000,
+			[VDD_LOW_L1] = 307200000},
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+	.cmd_rcgr = 0x20a4,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_2,
+	.freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_vsync_clk_src",
+		.parent_names = disp_cc_parent_names_2,
+		.num_parents = 2,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_MIN] = 19200000},
+	},
+};
+
+static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = {
+	F(32764, P_CHIP_SLEEP_CLK, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 disp_cc_sleep_clk_src = {
+	.cmd_rcgr = 0x6050,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_5,
+	.freq_tbl = ftbl_disp_cc_sleep_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_sleep_clk_src",
+		.parent_names = disp_cc_parent_names_5,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_MIN] = 32000},
+	},
+};
+
+static struct clk_rcg2 disp_cc_xo_clk_src = {
+	.cmd_rcgr = 0x6034,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_2,
+	.freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_xo_clk_src",
+		.parent_names = disp_cc_parent_names_2,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_MIN] = 19200000},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+	.halt_reg = 0x2044,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2044,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_ahb_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+	.halt_reg = 0x2024,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_byte0_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_byte0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+	.halt_reg = 0x2028,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2028,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_byte0_intf_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_byte0_div_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+	.halt_reg = 0x202c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x202c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_esc0_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_esc0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+	.halt_reg = 0x2008,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_mdp_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_mdp_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+	.halt_reg = 0x2018,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x2018,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_mdp_lut_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_mdp_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+	.halt_reg = 0x4004,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x4004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_non_gdsc_ahb_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+	.halt_reg = 0x2004,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_pclk0_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_pclk0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_rot_clk = {
+	.halt_reg = 0x2010,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2010,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_rot_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_rot_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+	.halt_reg = 0x2020,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2020,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_vsync_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_vsync_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_sleep_clk = {
+	.halt_reg = 0x6068,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x6068,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_sleep_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_sleep_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_xo_clk = {
+	.halt_reg = 0x604c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x604c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_xo_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_xo_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_regmap *disp_cc_bengal_clocks[] = {
+	[DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+	[DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+	[DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+	[DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+	[DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+	[DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+	[DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+	[DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+	[DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+	[DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+	[DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+	[DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+	[DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+	[DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+	[DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr,
+	[DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
+	[DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+	[DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+	[DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+	[DISP_CC_PLL0_OUT_MAIN] = &disp_cc_pll0_out_main.clkr,
+	[DISP_CC_SLEEP_CLK] = &disp_cc_sleep_clk.clkr,
+	[DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr,
+	[DISP_CC_XO_CLK] = &disp_cc_xo_clk.clkr,
+	[DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr,
+};
+
+static const struct regmap_config disp_cc_bengal_regmap_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.max_register = 0x10000,
+	.fast_io = true,
+};
+
+static const struct qcom_cc_desc disp_cc_bengal_desc = {
+	.config = &disp_cc_bengal_regmap_config,
+	.clks = disp_cc_bengal_clocks,
+	.num_clks = ARRAY_SIZE(disp_cc_bengal_clocks),
+};
+
+static const struct of_device_id dispcc_bengal_match_table[] = {
+	{ .compatible = "qcom,bengal-dispcc" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_bengal_match_table);
+
+static int dispcc_bengal_probe(struct platform_device *pdev)
+{
+	struct regmap *regmap;
+	struct clk *clk;
+	int ret;
+
+	vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx");
+	if (IS_ERR(vdd_cx.regulator[0])) {
+		if (PTR_ERR(vdd_cx.regulator[0]) != -EPROBE_DEFER)
+			dev_err(&pdev->dev,
+					"Unable to get vdd_cx regulator\n");
+		return PTR_ERR(vdd_cx.regulator[0]);
+	}
+
+	regmap = qcom_cc_map(pdev, &disp_cc_bengal_desc);
+	if (IS_ERR(regmap)) {
+		pr_err("Failed to map the disp_cc registers\n");
+		return PTR_ERR(regmap);
+	}
+
+	clk = clk_get(&pdev->dev, "cfg_ahb_clk");
+	if (IS_ERR(clk)) {
+		if (PTR_ERR(clk) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get ahb clock handle\n");
+		return PTR_ERR(clk);
+	}
+	clk_put(clk);
+
+	clk_alpha_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+
+	ret = qcom_cc_really_probe(pdev, &disp_cc_bengal_desc, regmap);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register Display CC clocks\n");
+		return ret;
+	}
+
+	dev_info(&pdev->dev, "Registered Display CC clocks\n");
+	return 0;
+}
+
+static struct platform_driver dispcc_bengal_driver = {
+	.probe = dispcc_bengal_probe,
+	.driver = {
+		.name = "bengal-dispcc",
+		.of_match_table = dispcc_bengal_match_table,
+	},
+};
+
+static int __init disp_cc_bengal_init(void)
+{
+	return platform_driver_register(&dispcc_bengal_driver);
+}
+subsys_initcall(disp_cc_bengal_init);
+
+static void __exit disp_cc_bengal_exit(void)
+{
+	platform_driver_unregister(&dispcc_bengal_driver);
+}
+module_exit(disp_cc_bengal_exit);
+
+MODULE_DESCRIPTION("QTI DISPCC bengal Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/dispcc-lito.c b/drivers/clk/qcom/dispcc-lito.c
index 35a7d34..0d4f4b7 100644
--- a/drivers/clk/qcom/dispcc-lito.c
+++ b/drivers/clk/qcom/dispcc-lito.c
@@ -25,7 +25,7 @@
 #include "common.h"
 #include "gdsc.h"
 #include "reset.h"
-#include "vdd-level.h"
+#include "vdd-level-lito.h"
 
 static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
 
diff --git a/drivers/clk/qcom/gcc-lito.c b/drivers/clk/qcom/gcc-lito.c
index f5ec81f..926a8da 100644
--- a/drivers/clk/qcom/gcc-lito.c
+++ b/drivers/clk/qcom/gcc-lito.c
@@ -26,7 +26,7 @@
 #include "clk-regmap.h"
 #include "common.h"
 #include "reset.h"
-#include "vdd-level.h"
+#include "vdd-level-lito.h"
 
 #define GCC_NPU_MISC				0x4d110
 #define GCC_GPU_MISC				0x71028
diff --git a/drivers/clk/qcom/gpucc-bengal.c b/drivers/clk/qcom/gpucc-bengal.c
new file mode 100644
index 0000000..91f62b2
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-bengal.c
@@ -0,0 +1,517 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gpucc-bengal.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "vdd-level-bengal.h"
+
+#define CX_GMU_CBCR_SLEEP_MASK          0xf
+#define CX_GMU_CBCR_SLEEP_SHIFT         4
+#define CX_GMU_CBCR_WAKE_MASK           0xf
+#define CX_GMU_CBCR_WAKE_SHIFT          8
+
+static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
+
+enum {
+	P_BI_TCXO,
+	P_CORE_BI_PLL_TEST_SE,
+	P_GPLL0_OUT_MAIN,
+	P_GPLL0_OUT_MAIN_DIV,
+	P_GPU_CC_PLL0_2X_DIV_CLK_SRC,
+	P_GPU_CC_PLL0_OUT_AUX2,
+	P_GPU_CC_PLL0_OUT_MAIN,
+	P_GPU_CC_PLL1_OUT_AUX,
+	P_GPU_CC_PLL1_OUT_AUX2,
+	P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPU_CC_PLL0_OUT_MAIN, 1 },
+	{ P_GPU_CC_PLL1_OUT_MAIN, 3 },
+	{ P_GPLL0_OUT_MAIN, 5 },
+	{ P_GPLL0_OUT_MAIN_DIV, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gpu_cc_parent_names_0[] = {
+	"bi_tcxo",
+	"gpu_cc_pll0_out_main",
+	"gpu_cc_pll1_out_main",
+	"gpll0_out_main",
+	"gpll0_out_main_div",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPU_CC_PLL0_2X_DIV_CLK_SRC, 1 },
+	{ P_GPU_CC_PLL0_OUT_AUX2, 2 },
+	{ P_GPU_CC_PLL1_OUT_AUX, 3 },
+	{ P_GPU_CC_PLL1_OUT_AUX2, 4 },
+	{ P_GPLL0_OUT_MAIN, 5 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gpu_cc_parent_names_1[] = {
+	"bi_tcxo",
+	"gpu_cc_pll0_out_aux",
+	"gpu_cc_pll0_out_aux2",
+	"gpu_cc_pll1_out_aux",
+	"gpu_cc_pll1_out_aux2",
+	"gpll0_out_main",
+	"core_bi_pll_test_se",
+};
+
+static struct pll_vco default_vco[] = {
+	{ 1000000000, 2000000000, 0 },
+	{ 750000000, 1500000000, 1 },
+	{ 500000000, 1000000000, 2 },
+	{ 250000000, 500000000, 3 },
+};
+
+static const struct alpha_pll_config gpu_cc_pll0_config = {
+	.l = 0x1B,
+	.alpha = 0x55000000,
+	.alpha_hi = 0xB5,
+	.alpha_en_mask = BIT(24),
+	.main_output_mask = BIT(0),
+	.aux_output_mask = BIT(1),
+	.aux2_output_mask = BIT(2),
+	.config_ctl_val = 0x40008529,
+};
+
+/* 532MHz configuration */
+static struct clk_alpha_pll gpu_cc_pll0 = {
+	.offset = 0x0,
+	.vco_table = default_vco,
+	.num_vco = ARRAY_SIZE(default_vco),
+	.flags = SUPPORTS_DYNAMIC_UPDATE,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_pll0",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_ops,
+			.vdd_class = &vdd_cx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 1000000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_gpu_cc_pll0_out_aux2[] = {
+	{ 0x0, 1 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv gpu_cc_pll0_out_aux2 = {
+	.offset = 0x0,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_gpu_cc_pll0_out_aux2,
+	.num_post_div = ARRAY_SIZE(post_div_table_gpu_cc_pll0_out_aux2),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpu_cc_pll0_out_aux2",
+		.parent_names = (const char *[]){ "gpu_cc_pll0" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_ops,
+	},
+};
+
+/* 640MHz configuration */
+static const struct alpha_pll_config gpu_cc_pll1_config = {
+	.l = 0x21,
+	.alpha = 0x55555555,
+	.alpha_hi = 0x55,
+	.alpha_en_mask = BIT(24),
+	.vco_val = 0x2 << 20,
+	.vco_mask = GENMASK(21, 20),
+	.main_output_mask = BIT(0),
+	.aux_output_mask = BIT(1),
+	.config_ctl_val = 0x40008529,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+	.offset = 0x100,
+	.vco_table = default_vco,
+	.num_vco = ARRAY_SIZE(default_vco),
+	.flags = SUPPORTS_DYNAMIC_UPDATE,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_pll1",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_ops,
+			.vdd_class = &vdd_cx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 1000000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_gpu_cc_pll1_out_aux[] = {
+	{ 0x0, 1 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv gpu_cc_pll1_out_aux = {
+	.offset = 0x100,
+	.post_div_shift = 15,
+	.post_div_table = post_div_table_gpu_cc_pll1_out_aux,
+	.num_post_div = ARRAY_SIZE(post_div_table_gpu_cc_pll1_out_aux),
+	.width = 3,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpu_cc_pll1_out_aux",
+		.parent_names = (const char *[]){ "gpu_cc_pll1" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+	F(200000000, P_GPLL0_OUT_MAIN, 1.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+	.cmd_rcgr = 0x1120,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gpu_cc_parent_map_0,
+	.freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpu_cc_gmu_clk_src",
+		.parent_names = gpu_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 200000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gx_gfx3d_clk_src[] = {
+	F(320000000, P_GPU_CC_PLL1_OUT_AUX, 2, 0, 0),
+	F(465000000, P_GPU_CC_PLL1_OUT_AUX, 2, 0, 0),
+	F(600000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0),
+	F(745000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0),
+	F(820000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0),
+	F(900000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0),
+	F(950000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0),
+	F(980000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gpu_cc_gx_gfx3d_clk_src = {
+	.cmd_rcgr = 0x101c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gpu_cc_parent_map_1,
+	.freq_tbl = ftbl_gpu_cc_gx_gfx3d_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpu_cc_gx_gfx3d_clk_src",
+		.parent_names = gpu_cc_parent_names_1,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 320000000,
+			[VDD_LOW] = 465000000,
+			[VDD_LOW_L1] = 600000000,
+			[VDD_NOMINAL] = 745000000,
+			[VDD_NOMINAL_L1] = 820000000,
+			[VDD_HIGH] = 900000000,
+			[VDD_HIGH_L1] = 980000000},
+	},
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+	.halt_reg = 0x1078,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x1078,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_ahb_clk",
+			.flags = CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+	.halt_reg = 0x107c,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x107c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_crc_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cx_gfx3d_clk = {
+	.halt_reg = 0x10a4,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x10a4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cx_gfx3d_clk",
+			.parent_names = (const char *[]){
+				"gpu_cc_gx_gfx3d_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+	.halt_reg = 0x1098,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1098,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cx_gmu_clk",
+			.parent_names = (const char *[]){
+				"gpu_cc_gmu_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+	.halt_reg = 0x108c,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x108c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cx_snoc_dvm_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+	.halt_reg = 0x1004,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x1004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cxo_aon_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+	.halt_reg = 0x109c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x109c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cxo_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_gx_cxo_clk = {
+	.halt_reg = 0x1060,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1060,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_gx_cxo_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_gx_gfx3d_clk = {
+	.halt_reg = 0x1054,
+	.halt_check = BRANCH_HALT_SKIP,
+	.clkr = {
+		.enable_reg = 0x1054,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_gx_gfx3d_clk",
+			.parent_names = (const char *[]){
+				"gpu_cc_gx_gfx3d_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_sleep_clk = {
+	.halt_reg = 0x1090,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x1090,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_sleep_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
+	.halt_reg = 0x5000,
+	.halt_check = BRANCH_VOTED,
+	.clkr = {
+		.enable_reg = 0x5000,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			 .name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
+			 .ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_regmap *gpu_cc_bengal_clocks[] = {
+	[GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+	[GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+	[GPU_CC_CX_GFX3D_CLK] = &gpu_cc_cx_gfx3d_clk.clkr,
+	[GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+	[GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+	[GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+	[GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+	[GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+	[GPU_CC_GX_CXO_CLK] = &gpu_cc_gx_cxo_clk.clkr,
+	[GPU_CC_GX_GFX3D_CLK] = &gpu_cc_gx_gfx3d_clk.clkr,
+	[GPU_CC_GX_GFX3D_CLK_SRC] = &gpu_cc_gx_gfx3d_clk_src.clkr,
+	[GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+	[GPU_CC_PLL0_OUT_AUX2] = &gpu_cc_pll0_out_aux2.clkr,
+	[GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+	[GPU_CC_PLL1_OUT_AUX] = &gpu_cc_pll1_out_aux.clkr,
+	[GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+	[GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
+};
+
+static const struct regmap_config gpu_cc_bengal_regmap_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.max_register = 0x7008,
+	.fast_io = true,
+};
+
+static const struct qcom_cc_desc gpu_cc_bengal_desc = {
+	.config = &gpu_cc_bengal_regmap_config,
+	.clks = gpu_cc_bengal_clocks,
+	.num_clks = ARRAY_SIZE(gpu_cc_bengal_clocks),
+};
+
+static const struct of_device_id gpucc_bengal_match_table[] = {
+	{ .compatible = "qcom,bengal-gpucc" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, gpucc_bengal_match_table);
+
+static int gpucc_bengal_probe(struct platform_device *pdev)
+{
+	struct regmap *regmap;
+	unsigned int value, mask;
+	int ret;
+
+	vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx");
+	if (IS_ERR(vdd_cx.regulator[0])) {
+		if (!(PTR_ERR(vdd_cx.regulator[0]) == -EPROBE_DEFER))
+			dev_err(&pdev->dev,
+				"Unable to get vdd_cx regulator\n");
+		return PTR_ERR(vdd_cx.regulator[0]);
+	}
+
+	regmap = qcom_cc_map(pdev, &gpu_cc_bengal_desc);
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
+	clk_alpha_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+	clk_alpha_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+	/* Recommended WAKEUP/SLEEP settings for the gpu_cc_cx_gmu_clk */
+	mask = CX_GMU_CBCR_WAKE_MASK << CX_GMU_CBCR_WAKE_SHIFT;
+	mask |= CX_GMU_CBCR_SLEEP_MASK << CX_GMU_CBCR_SLEEP_SHIFT;
+	value = 0xf << CX_GMU_CBCR_WAKE_SHIFT | 0xf << CX_GMU_CBCR_SLEEP_SHIFT;
+	regmap_update_bits(regmap, gpu_cc_cx_gmu_clk.clkr.enable_reg,
+								mask, value);
+
+	ret = qcom_cc_really_probe(pdev, &gpu_cc_bengal_desc, regmap);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register GPUCC clocks\n");
+		return ret;
+	}
+
+	dev_info(&pdev->dev, "Registered GPUCC clocks\n");
+	return ret;
+}
+
+static struct platform_driver gpucc_bengal_driver = {
+	.probe = gpucc_bengal_probe,
+	.driver = {
+		.name = "bengal-gpucc",
+		.of_match_table = gpucc_bengal_match_table,
+	},
+};
+
+static int __init gpu_cc_bengal_init(void)
+{
+	return platform_driver_register(&gpucc_bengal_driver);
+}
+subsys_initcall(gpu_cc_bengal_init);
+
+static void __exit gpu_cc_bengal_exit(void)
+{
+	platform_driver_unregister(&gpucc_bengal_driver);
+}
+module_exit(gpu_cc_bengal_exit);
+
+MODULE_DESCRIPTION("QTI GPU_CC BENGAL Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gpucc-lito.c b/drivers/clk/qcom/gpucc-lito.c
index 310d4bb..156fe4f 100644
--- a/drivers/clk/qcom/gpucc-lito.c
+++ b/drivers/clk/qcom/gpucc-lito.c
@@ -26,7 +26,7 @@
 #include "clk-regmap.h"
 #include "common.h"
 #include "reset.h"
-#include "vdd-level.h"
+#include "vdd-level-lito.h"
 
 static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
 static DEFINE_VDD_REGULATORS(vdd_mx, VDD_NUM, 1, vdd_corner);
diff --git a/drivers/clk/qcom/npucc-lito.c b/drivers/clk/qcom/npucc-lito.c
index e2604f5..5a56ca9 100644
--- a/drivers/clk/qcom/npucc-lito.c
+++ b/drivers/clk/qcom/npucc-lito.c
@@ -26,7 +26,7 @@
 #include "clk-regmap.h"
 #include "common.h"
 #include "reset.h"
-#include "vdd-level.h"
+#include "vdd-level-lito.h"
 
 static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
 
@@ -283,10 +283,11 @@ static struct clk_fixed_factor npu_cc_crc_div = {
 
 static const struct freq_tbl ftbl_npu_cc_cal_hm0_clk_src[] = {
 	F(200000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
-	F(300000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
-	F(518400000, P_NPU_CC_CRC_DIV, 1, 0, 0),
-	F(633600000, P_NPU_CC_CRC_DIV, 1, 0, 0),
-	F(825600000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	F(230000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	F(422000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	F(557000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	F(729000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	F(844000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
 	F(1000000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
 	{ }
 };
@@ -308,11 +309,12 @@ static struct clk_rcg2 npu_cc_cal_hm0_clk_src = {
 		.num_rate_max = VDD_NUM,
 		.rate_max = (unsigned long[VDD_NUM]) {
 			[VDD_MIN] = 200000000,
-			[VDD_LOWER] = 300000000,
-			[VDD_LOW] = 518400000,
-			[VDD_LOW_L1] = 633600000,
-			[VDD_NOMINAL] = 825600000,
-			[VDD_HIGH] = 1000000000},
+			[VDD_LOWER] = 230000000,
+			[VDD_LOW] = 422000000,
+			[VDD_LOW_L1] = 557000000,
+			[VDD_NOMINAL] = 729000000,
+			[VDD_HIGH] = 844000000,
+			[VDD_HIGH_L1] = 1000000000},
 	},
 };
 
diff --git a/drivers/clk/qcom/vdd-level-lito.h b/drivers/clk/qcom/vdd-level-lito.h
new file mode 100644
index 0000000..b968de1
--- /dev/null
+++ b/drivers/clk/qcom/vdd-level-lito.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DRIVERS_CLK_QCOM_VDD_LEVEL_LITO_H
+#define __DRIVERS_CLK_QCOM_VDD_LEVEL_LITO_H
+
+#include <linux/regulator/consumer.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator-levels.h>
+
+enum vdd_levels {
+	VDD_NONE,
+	VDD_MIN,		/* MIN SVS */
+	VDD_LOWER,		/* SVS2 */
+	VDD_LOW,		/* SVS */
+	VDD_LOW_L1,		/* SVSL1 */
+	VDD_NOMINAL,		/* NOM */
+	VDD_HIGH,		/* TURBO */
+	VDD_HIGH_L1,		/* TURBO_L1 */
+	VDD_NUM,
+};
+
+static int vdd_corner[] = {
+	[VDD_NONE]    = 0,
+	[VDD_MIN]     = RPMH_REGULATOR_LEVEL_MIN_SVS,
+	[VDD_LOWER]   = RPMH_REGULATOR_LEVEL_LOW_SVS,
+	[VDD_LOW]     = RPMH_REGULATOR_LEVEL_SVS,
+	[VDD_LOW_L1]  = RPMH_REGULATOR_LEVEL_SVS_L1,
+	[VDD_NOMINAL] = RPMH_REGULATOR_LEVEL_NOM,
+	[VDD_HIGH]    = RPMH_REGULATOR_LEVEL_TURBO,
+	[VDD_HIGH_L1] = RPMH_REGULATOR_LEVEL_TURBO_L1,
+};
+
+#endif
diff --git a/drivers/clk/qcom/videocc-lito.c b/drivers/clk/qcom/videocc-lito.c
index 6619bb4..03b63f8 100644
--- a/drivers/clk/qcom/videocc-lito.c
+++ b/drivers/clk/qcom/videocc-lito.c
@@ -28,7 +28,7 @@
 #include "common.h"
 #include "gdsc.h"
 #include "reset.h"
-#include "vdd-level.h"
+#include "vdd-level-lito.h"
 
 static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
 
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index f4b013e..24485be 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -535,17 +535,11 @@ static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
 	unsigned int reg = id / 32;
 	unsigned int bit = id % 32;
 	u32 bitmask = BIT(bit);
-	unsigned long flags;
-	u32 value;
 
 	dev_dbg(priv->dev, "reset %u%02u\n", reg, bit);
 
 	/* Reset module */
-	spin_lock_irqsave(&priv->rmw_lock, flags);
-	value = readl(priv->base + SRCR(reg));
-	value |= bitmask;
-	writel(value, priv->base + SRCR(reg));
-	spin_unlock_irqrestore(&priv->rmw_lock, flags);
+	writel(bitmask, priv->base + SRCR(reg));
 
 	/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
 	udelay(35);
@@ -562,16 +556,10 @@ static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
 	unsigned int reg = id / 32;
 	unsigned int bit = id % 32;
 	u32 bitmask = BIT(bit);
-	unsigned long flags;
-	u32 value;
 
 	dev_dbg(priv->dev, "assert %u%02u\n", reg, bit);
 
-	spin_lock_irqsave(&priv->rmw_lock, flags);
-	value = readl(priv->base + SRCR(reg));
-	value |= bitmask;
-	writel(value, priv->base + SRCR(reg));
-	spin_unlock_irqrestore(&priv->rmw_lock, flags);
+	writel(bitmask, priv->base + SRCR(reg));
 	return 0;
 }
 
diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
index 568f59b..e7c877d 100644
--- a/drivers/clk/socfpga/clk-periph-s10.c
+++ b/drivers/clk/socfpga/clk-periph-s10.c
@@ -37,7 +37,7 @@ static unsigned long clk_peri_cnt_clk_recalc_rate(struct clk_hw *hwclk,
 	if (socfpgaclk->fixed_div) {
 		div = socfpgaclk->fixed_div;
 	} else {
-		if (!socfpgaclk->bypass_reg)
+		if (socfpgaclk->hw.reg)
 			div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1);
 	}
 
diff --git a/drivers/clk/sprd/Kconfig b/drivers/clk/sprd/Kconfig
index 8789247..bad8099 100644
--- a/drivers/clk/sprd/Kconfig
+++ b/drivers/clk/sprd/Kconfig
@@ -2,6 +2,7 @@
 	tristate "Clock support for Spreadtrum SoCs"
 	depends on ARCH_SPRD || COMPILE_TEST
 	default ARCH_SPRD
+	select REGMAP_MMIO
 
 if SPRD_COMMON_CLK
 
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 995685f..ecaf191 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1900,8 +1900,10 @@ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
 	target_freq = clamp_val(target_freq, policy->min, policy->max);
 
 	ret = cpufreq_driver->fast_switch(policy, target_freq);
-	if (ret)
+	if (ret) {
 		cpufreq_times_record_transition(policy, ret);
+		cpufreq_stats_record_transition(policy, ret);
+	}
 
 	return ret;
 }
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 1572129..21b919b 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -30,11 +30,12 @@ struct cpufreq_stats {
 static void cpufreq_stats_update(struct cpufreq_stats *stats)
 {
 	unsigned long long cur_time = get_jiffies_64();
+	unsigned long flags;
 
-	spin_lock(&cpufreq_stats_lock);
+	spin_lock_irqsave(&cpufreq_stats_lock, flags);
 	stats->time_in_state[stats->last_index] += cur_time - stats->last_time;
 	stats->last_time = cur_time;
-	spin_unlock(&cpufreq_stats_lock);
+	spin_unlock_irqrestore(&cpufreq_stats_lock, flags);
 }
 
 static void cpufreq_stats_clear_table(struct cpufreq_stats *stats)
@@ -58,9 +59,6 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
 	ssize_t len = 0;
 	int i;
 
-	if (policy->fast_switch_enabled)
-		return 0;
-
 	cpufreq_stats_update(stats);
 	for (i = 0; i < stats->state_num; i++) {
 		len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
@@ -84,9 +82,6 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
 	ssize_t len = 0;
 	int i, j;
 
-	if (policy->fast_switch_enabled)
-		return 0;
-
 	len += snprintf(buf + len, PAGE_SIZE - len, "   From  :    To\n");
 	len += snprintf(buf + len, PAGE_SIZE - len, "         : ");
 	for (i = 0; i < stats->state_num; i++) {
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index c7710c1..a0620c9 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -145,11 +145,19 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
 	int err = -ENODEV;
 
 	cpu = of_get_cpu_node(policy->cpu, NULL);
-
-	of_node_put(cpu);
 	if (!cpu)
 		goto out;
 
+	max_freqp = of_get_property(cpu, "clock-frequency", NULL);
+	of_node_put(cpu);
+	if (!max_freqp) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	/* we need the freq in kHz */
+	max_freq = *max_freqp / 1000;
+
 	dn = of_find_compatible_node(NULL, NULL, "1682m-sdc");
 	if (!dn)
 		dn = of_find_compatible_node(NULL, NULL,
@@ -185,16 +193,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
 	}
 
 	pr_debug("init cpufreq on CPU %d\n", policy->cpu);
-
-	max_freqp = of_get_property(cpu, "clock-frequency", NULL);
-	if (!max_freqp) {
-		err = -EINVAL;
-		goto out_unmap_sdcpwr;
-	}
-
-	/* we need the freq in kHz */
-	max_freq = *max_freqp / 1000;
-
 	pr_debug("max clock-frequency is at %u kHz\n", max_freq);
 	pr_debug("initializing frequency table\n");
 
@@ -212,9 +210,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
 
 	return cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency());
 
-out_unmap_sdcpwr:
-	iounmap(sdcpwr_mapbase);
-
 out_unmap_sdcasr:
 	iounmap(sdcasr_mapbase);
 out:
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
index ca1f0d7..e5dcb29 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -61,6 +61,19 @@ static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
 static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm,
 				   unsigned int authsize)
 {
+	switch (authsize) {
+	case 16:
+	case 15:
+	case 14:
+	case 13:
+	case 12:
+	case 8:
+	case 4:
+		break;
+	default:
+		return -EINVAL;
+	}
+
 	return 0;
 }
 
@@ -107,6 +120,7 @@ static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt)
 	memset(&rctx->cmd, 0, sizeof(rctx->cmd));
 	INIT_LIST_HEAD(&rctx->cmd.entry);
 	rctx->cmd.engine = CCP_ENGINE_AES;
+	rctx->cmd.u.aes.authsize = crypto_aead_authsize(tfm);
 	rctx->cmd.u.aes.type = ctx->u.aes.type;
 	rctx->cmd.u.aes.mode = ctx->u.aes.mode;
 	rctx->cmd.u.aes.action = encrypt;
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index e212bad..1e2e421 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -625,6 +625,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
 
 	unsigned long long *final;
 	unsigned int dm_offset;
+	unsigned int authsize;
 	unsigned int jobid;
 	unsigned int ilen;
 	bool in_place = true; /* Default value */
@@ -646,6 +647,21 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
 	if (!aes->key) /* Gotta have a key SGL */
 		return -EINVAL;
 
+	/* Zero defaults to 16 bytes, the maximum size */
+	authsize = aes->authsize ? aes->authsize : AES_BLOCK_SIZE;
+	switch (authsize) {
+	case 16:
+	case 15:
+	case 14:
+	case 13:
+	case 12:
+	case 8:
+	case 4:
+		break;
+	default:
+		return -EINVAL;
+	}
+
 	/* First, decompose the source buffer into AAD & PT,
 	 * and the destination buffer into AAD, CT & tag, or
 	 * the input into CT & tag.
@@ -660,7 +676,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
 		p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
 	} else {
 		/* Input length for decryption includes tag */
-		ilen = aes->src_len - AES_BLOCK_SIZE;
+		ilen = aes->src_len - authsize;
 		p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
 	}
 
@@ -769,8 +785,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
 		while (src.sg_wa.bytes_left) {
 			ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
 			if (!src.sg_wa.bytes_left) {
-				unsigned int nbytes = aes->src_len
-						      % AES_BLOCK_SIZE;
+				unsigned int nbytes = ilen % AES_BLOCK_SIZE;
 
 				if (nbytes) {
 					op.eom = 1;
@@ -842,19 +857,19 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
 
 	if (aes->action == CCP_AES_ACTION_ENCRYPT) {
 		/* Put the ciphered tag after the ciphertext. */
-		ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE);
+		ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize);
 	} else {
 		/* Does this ciphered tag match the input? */
-		ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE,
+		ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
 					   DMA_BIDIRECTIONAL);
 		if (ret)
 			goto e_tag;
-		ret = ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
+		ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
 		if (ret)
 			goto e_tag;
 
 		ret = crypto_memneq(tag.address, final_wa.address,
-				    AES_BLOCK_SIZE) ? -EBADMSG : 0;
+				    authsize) ? -EBADMSG : 0;
 		ccp_dm_free(&tag);
 	}
 
@@ -862,11 +877,11 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
 	ccp_dm_free(&final_wa);
 
 e_dst:
-	if (aes->src_len && !in_place)
+	if (ilen > 0 && !in_place)
 		ccp_free_data(&dst, cmd_q);
 
 e_src:
-	if (aes->src_len)
+	if (ilen > 0)
 		ccp_free_data(&src, cmd_q);
 
 e_aad:
diff --git a/drivers/esoc/esoc-mdm-4x.c b/drivers/esoc/esoc-mdm-4x.c
index 29edd09..86e47fe 100644
--- a/drivers/esoc/esoc-mdm-4x.c
+++ b/drivers/esoc/esoc-mdm-4x.c
@@ -262,7 +262,7 @@ static int mdm_cmd_exe(enum esoc_cmd cmd, struct esoc_clink *esoc)
 		esoc_mdm_log(
 		"ESOC_FORCE_PWR_OFF: Queueing request: ESOC_REQ_SHUTDOWN\n");
 		esoc_clink_queue_request(ESOC_REQ_SHUTDOWN, esoc);
-		mdm_toggle_soft_reset(mdm, false);
+		mdm_power_down(mdm);
 		mdm_update_gpio_configs(mdm, GPIO_UPDATE_BOOTING_CONFIG);
 		break;
 	case ESOC_RESET:
@@ -484,7 +484,7 @@ static void mdm_notify(enum esoc_notify notify, struct esoc_clink *esoc)
 		mdm->ready = false;
 		esoc_mdm_log(
 		"ESOC_PRIMARY_REBOOT: Powering down the modem\n");
-		mdm_toggle_soft_reset(mdm, false);
+		mdm_power_down(mdm);
 		break;
 	};
 }
@@ -1081,26 +1081,27 @@ static int sdx55m_setup_hw(struct mdm_ctrl *mdm,
 		dev_err(mdm->dev, "Failed to parse DT gpios\n");
 		goto err_destroy_wrkq;
 	}
+	if (!of_property_read_bool(node, "qcom,esoc-spmi-soft-reset")) {
+		ret = mdm_pon_dt_init(mdm);
+		if (ret) {
+			esoc_mdm_log("Failed to parse PON DT gpios\n");
+			dev_err(mdm->dev, "Failed to parse PON DT gpio\n");
+			goto err_destroy_wrkq;
+		}
 
-	ret = mdm_pon_dt_init(mdm);
-	if (ret) {
-		esoc_mdm_log("Failed to parse PON DT gpios\n");
-		dev_err(mdm->dev, "Failed to parse PON DT gpio\n");
-		goto err_destroy_wrkq;
+		ret = mdm_pon_setup(mdm);
+		if (ret) {
+			esoc_mdm_log("Failed to setup PON\n");
+			dev_err(mdm->dev, "Failed to setup PON\n");
+			goto err_destroy_wrkq;
+		}
 	}
 
 	ret = mdm_pinctrl_init(mdm);
 	if (ret) {
 		esoc_mdm_log("Failed to init pinctrl\n");
 		dev_err(mdm->dev, "Failed to init pinctrl\n");
-		goto err_destroy_wrkq;
-	}
-
-	ret = mdm_pon_setup(mdm);
-	if (ret) {
-		esoc_mdm_log("Failed to setup PON\n");
-		dev_err(mdm->dev, "Failed to setup PON\n");
-		goto err_destroy_wrkq;
+		goto err_release_ipc;
 	}
 
 	ret = mdm_configure_ipc(mdm, pdev);
diff --git a/drivers/esoc/esoc-mdm-drv.c b/drivers/esoc/esoc-mdm-drv.c
index 3854f25..c90b81e 100644
--- a/drivers/esoc/esoc-mdm-drv.c
+++ b/drivers/esoc/esoc-mdm-drv.c
@@ -183,6 +183,8 @@ static void mdm_handle_clink_evt(enum esoc_evt evt,
 			"Modem in crash state or not booted. Ignoring.\n");
 			return;
 		}
+		esoc_mdm_log("Setting crash flag\n");
+		mdm_drv->mode = CRASH;
 		queue_work(mdm_drv->mdm_queue, &mdm_drv->ssr_work);
 		break;
 	case ESOC_REQ_ENG_ON:
@@ -209,8 +211,7 @@ static void mdm_ssr_fn(struct work_struct *work)
 
 	esoc_client_link_mdm_crash(mdm_drv->esoc_clink);
 	mdm_wait_for_status_low(mdm, false);
-	esoc_mdm_log("Starting SSR work and setting crash state\n");
-	mdm_drv->mode = CRASH;
+	esoc_mdm_log("Starting SSR work\n");
 
 	/*
 	 * If restarting esoc fails, the SSR framework triggers a kernel panic
@@ -293,12 +294,14 @@ static int mdm_subsys_shutdown(const struct subsys_desc *crashed_subsys,
 	 container_of(crashed_subsys, struct esoc_clink, subsys);
 	struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink);
 	const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
+	struct mdm_ctrl *mdm = get_esoc_clink_data(mdm_drv->esoc_clink);
 
 	esoc_mdm_log("Shutdown request from SSR\n");
 
 	mutex_lock(&mdm_drv->poff_lock);
 	if (mdm_drv->mode == CRASH || mdm_drv->mode == PEER_CRASH) {
 		esoc_mdm_log("Shutdown in crash mode\n");
+		mdm_wait_for_status_low(mdm, false);
 		if (mdm_dbg_stall_cmd(ESOC_PREPARE_DEBUG)) {
 			/* We want to mask debug command.
 			 * In this case return success
diff --git a/drivers/esoc/esoc-mdm-pon.c b/drivers/esoc/esoc-mdm-pon.c
index abde8c7..1dfff3a 100644
--- a/drivers/esoc/esoc-mdm-pon.c
+++ b/drivers/esoc/esoc-mdm-pon.c
@@ -206,6 +206,12 @@ static int sdx50m_power_down(struct mdm_ctrl *mdm)
 	return 0;
 }
 
+static int sdx55m_power_down(struct mdm_ctrl *mdm)
+{
+	esoc_mdm_log("Performing warm reset as cold reset is not supported\n");
+	return sdx55m_toggle_soft_reset(mdm, false);
+}
+
 static void mdm9x55_cold_reset(struct mdm_ctrl *mdm)
 {
 	dev_dbg(mdm->dev, "Triggering mdm cold reset");
@@ -318,6 +324,7 @@ struct mdm_pon_ops sdx50m_pon_ops = {
 struct mdm_pon_ops sdx55m_pon_ops = {
 	.pon = mdm4x_do_first_power_on,
 	.soft_reset = sdx55m_toggle_soft_reset,
+	.poff_force = sdx55m_power_down,
 	.dt_init = mdm4x_pon_dt_init,
 	.setup = mdm4x_pon_setup,
 };
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 6da80f3..6da0ab4 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -198,7 +198,7 @@
 
 config ISCSI_IBFT_FIND
 	bool "iSCSI Boot Firmware Table Attributes"
-	depends on X86 && ACPI
+	depends on X86 && ISCSI_IBFT
 	default n
 	help
 	  This option enables the kernel to find the region of memory
@@ -209,7 +209,8 @@
 config ISCSI_IBFT
 	tristate "iSCSI Boot Firmware Table Attributes module"
 	select ISCSI_BOOT_SYSFS
-	depends on ISCSI_IBFT_FIND && SCSI && SCSI_LOWLEVEL
+	select ISCSI_IBFT_FIND if X86
+	depends on ACPI && SCSI && SCSI_LOWLEVEL
 	default	n
 	help
 	  This option enables support for detection and exposing of iSCSI
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index c51462f..966aef3 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -93,6 +93,10 @@ MODULE_DESCRIPTION("sysfs interface to BIOS iBFT information");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(IBFT_ISCSI_VERSION);
 
+#ifndef CONFIG_ISCSI_IBFT_FIND
+struct acpi_table_ibft *ibft_addr;
+#endif
+
 struct ibft_hdr {
 	u8 id;
 	u8 version;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 3afb621..58ba5aa 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1082,9 +1082,11 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 		if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
 			lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW;
 		if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
-			lineinfo.flags |= GPIOLINE_FLAG_OPEN_DRAIN;
+			lineinfo.flags |= (GPIOLINE_FLAG_OPEN_DRAIN |
+					   GPIOLINE_FLAG_IS_OUT);
 		if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
-			lineinfo.flags |= GPIOLINE_FLAG_OPEN_SOURCE;
+			lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
+					   GPIOLINE_FLAG_IS_OUT);
 
 		if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
 			return -EFAULT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index f5fb937..65cecfd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -707,7 +707,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
 	thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
 	bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
 
-	data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
+	data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
 
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index e3f5e5d..f4b89d1 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -462,8 +462,10 @@ void dc_link_set_test_pattern(struct dc_link *link,
 
 static void destruct(struct dc *dc)
 {
-	dc_release_state(dc->current_state);
-	dc->current_state = NULL;
+	if (dc->current_state) {
+		dc_release_state(dc->current_state);
+		dc->current_state = NULL;
+	}
 
 	destroy_links(dc);
 
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index e0a96ab..f0d68aa 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -222,7 +222,7 @@ bool resource_construct(
 		 * PORT_CONNECTIVITY == 1 (as instructed by HW team).
 		 */
 		update_num_audio(&straps, &num_audio, &pool->audio_support);
-		for (i = 0; i < pool->pipe_count && i < num_audio; i++) {
+		for (i = 0; i < caps->num_audio; i++) {
 			struct audio *aud = create_funcs->create_audio(ctx, i);
 
 			if (aud == NULL) {
@@ -1713,6 +1713,12 @@ static struct audio *find_first_free_audio(
 			return pool->audios[i];
 		}
 	}
+
+    /* use engine id to find free audio */
+	if ((id < pool->audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
+		return pool->audios[id];
+	}
+
 	/*not found the matching one, first come first serve*/
 	for (i = 0; i < pool->audio_count; i++) {
 		if (res_ctx->is_audio_acquired[i] == false) {
@@ -1866,6 +1872,7 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
 		pix_clk /= 2;
 	if (timing->pixel_encoding != PIXEL_ENCODING_YCBCR422) {
 		switch (timing->display_color_depth) {
+		case COLOR_DEPTH_666:
 		case COLOR_DEPTH_888:
 			normalized_pix_clk = pix_clk;
 			break;
@@ -1949,7 +1956,7 @@ enum dc_status resource_map_pool_resources(
 	/* TODO: Add check if ASIC support and EDID audio */
 	if (!stream->sink->converter_disable_audio &&
 	    dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
-	    stream->audio_info.mode_count) {
+	    stream->audio_info.mode_count && stream->audio_info.flags.all) {
 		pipe_ctx->stream_res.audio = find_first_free_audio(
 		&context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index 070ab56..da8b198 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -242,6 +242,10 @@ static void dmcu_set_backlight_level(
 	s2 |= (level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
 
 	REG_WRITE(BIOS_SCRATCH_2, s2);
+
+	/* waitDMCUReadyForCmd */
+	REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT,
+			0, 1, 80000);
 }
 
 static void dce_abm_init(struct abm *abm)
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index c0b9ca1..f4469fa 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -159,7 +159,7 @@ struct resource_pool {
 	struct clock_source *clock_sources[MAX_CLOCK_SOURCES];
 	unsigned int clk_src_count;
 
-	struct audio *audios[MAX_PIPES];
+	struct audio *audios[MAX_AUDIOS];
 	unsigned int audio_count;
 	struct audio_support audio_support;
 
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index cf7433e..7190174 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -34,6 +34,7 @@
  * Data types shared between different Virtual HW blocks
  ******************************************************************************/
 
+#define MAX_AUDIOS 7
 #define MAX_PIPES 6
 
 struct gamma_curve {
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index bf6cad6..7a3e5a8 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -46,6 +46,7 @@
 config DRM_LVDS_ENCODER
 	tristate "Transparent parallel to LVDS encoder support"
 	depends on OF
+	select DRM_KMS_HELPER
 	select DRM_PANEL_BRIDGE
 	help
 	  Support for transparent parallel to LVDS encoders that don't require
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 77b1800..2fefca4 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -794,7 +794,7 @@ static int atomic_remove_fb(struct drm_framebuffer *fb)
 	struct drm_device *dev = fb->dev;
 	struct drm_atomic_state *state;
 	struct drm_plane *plane;
-	struct drm_connector *conn;
+	struct drm_connector *conn __maybe_unused;
 	struct drm_connector_state *conn_state;
 	int i, ret;
 	unsigned plane_mask;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index 0ddb6ee..df22843 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -108,12 +108,12 @@ static inline int scaler_reset(struct scaler_context *scaler)
 	scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG);
 	do {
 		cpu_relax();
-	} while (retry > 1 &&
+	} while (--retry > 1 &&
 		 scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET);
 	do {
 		cpu_relax();
 		scaler_write(1, SCALER_INT_EN);
-	} while (retry > 0 && scaler_read(SCALER_INT_EN) != 1);
+	} while (--retry > 0 && scaler_read(SCALER_INT_EN) != 1);
 
 	return retry ? 0 : -EIO;
 }
diff --git a/drivers/gpu/drm/i915/vlv_dsi_pll.c b/drivers/gpu/drm/i915/vlv_dsi_pll.c
index a132a80..77df790 100644
--- a/drivers/gpu/drm/i915/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/vlv_dsi_pll.c
@@ -413,8 +413,8 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev,
 	else
 		txesc2_div = 10;
 
-	I915_WRITE(MIPIO_TXESC_CLK_DIV1, txesc1_div & GLK_TX_ESC_CLK_DIV1_MASK);
-	I915_WRITE(MIPIO_TXESC_CLK_DIV2, txesc2_div & GLK_TX_ESC_CLK_DIV2_MASK);
+	I915_WRITE(MIPIO_TXESC_CLK_DIV1, (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK);
+	I915_WRITE(MIPIO_TXESC_CLK_DIV2, (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK);
 }
 
 /* Program BXT Mipi clocks and dividers */
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 89bd242..610139b 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1298,7 +1298,8 @@ static int add_gpu_components(struct device *dev,
 	if (!np)
 		return 0;
 
-	drm_of_component_match_add(dev, matchptr, compare_of, np);
+	if (of_device_is_available(np))
+		drm_of_component_match_add(dev, matchptr, compare_of, np);
 
 	of_node_put(np);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index b4e7404..a11637b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -40,8 +40,7 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
 		u8 *ptr = msg->buf;
 
 		while (remaining) {
-			u8 cnt = (remaining > 16) ? 16 : remaining;
-			u8 cmd;
+			u8 cnt, retries, cmd;
 
 			if (msg->flags & I2C_M_RD)
 				cmd = 1;
@@ -51,10 +50,19 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
 			if (mcnt || remaining > 16)
 				cmd |= 4; /* MOT */
 
-			ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, &cnt);
-			if (ret < 0) {
-				nvkm_i2c_aux_release(aux);
-				return ret;
+			for (retries = 0, cnt = 0;
+			     retries < 32 && !cnt;
+			     retries++) {
+				cnt = min_t(u8, remaining, 16);
+				ret = aux->func->xfer(aux, true, cmd,
+						      msg->addr, ptr, &cnt);
+				if (ret < 0)
+					goto out;
+			}
+			if (!cnt) {
+				AUX_TRACE(aux, "no data after 32 retries");
+				ret = -EIO;
+				goto out;
 			}
 
 			ptr += cnt;
@@ -64,8 +72,10 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
 		msg++;
 	}
 
+	ret = num;
+out:
 	nvkm_i2c_aux_release(aux);
-	return num;
+	return ret;
 }
 
 static u32
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 080f053..6a4da3a 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -436,7 +436,7 @@ static int rockchip_dp_resume(struct device *dev)
 
 static const struct dev_pm_ops rockchip_dp_pm_ops = {
 #ifdef CONFIG_PM_SLEEP
-	.suspend = rockchip_dp_suspend,
+	.suspend_late = rockchip_dp_suspend,
 	.resume_early = rockchip_dp_resume,
 #endif
 };
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index e4e09d4..59e9d05 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -389,8 +389,10 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
 		break;
 	}
 
-	if (retries == RETRIES)
+	if (retries == RETRIES) {
+		kfree(reply);
 		return -EINVAL;
+	}
 
 	*msg_len = reply_len;
 	*msg     = reply;
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index 5cb4cbb..481cd3b 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -194,7 +194,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a530v2 = {
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_1M,
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -220,7 +219,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a530v3 = {
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_1M,
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -286,7 +284,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a505 = {
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = (SZ_128K + SZ_8K),
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 16,
 	},
@@ -306,7 +303,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a506 = {
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = (SZ_128K + SZ_8K),
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 16,
 	},
@@ -384,7 +380,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a510 = {
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_256K,
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 16,
 	},
@@ -510,7 +505,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a540v2 = {
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_1M,
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -593,7 +587,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a512 = {
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = (SZ_256K + SZ_16K),
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -612,7 +605,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a508 = {
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = (SZ_128K + SZ_8K),
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -740,6 +732,43 @@ static const struct adreno_reglist a630_vbif_regs[] = {
 	{A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3},
 };
 
+
+/* For a615, a616, a618, a630, a640 and a680 */
+static const struct a6xx_protected_regs a630_protected_regs[] = {
+	{ A6XX_CP_PROTECT_REG + 0, 0x00000, 0x004ff, 0 },
+	{ A6XX_CP_PROTECT_REG + 1, 0x00501, 0x00506, 0 },
+	{ A6XX_CP_PROTECT_REG + 2, 0x0050b, 0x007ff, 0 },
+	{ A6XX_CP_PROTECT_REG + 3, 0x0050e, 0x0050e, 1 },
+	{ A6XX_CP_PROTECT_REG + 4, 0x00510, 0x00510, 1 },
+	{ A6XX_CP_PROTECT_REG + 5, 0x00534, 0x00534, 1 },
+	{ A6XX_CP_PROTECT_REG + 6, 0x00800, 0x00882, 1 },
+	{ A6XX_CP_PROTECT_REG + 7, 0x008a0, 0x008a8, 1 },
+	{ A6XX_CP_PROTECT_REG + 8, 0x008ab, 0x008cf, 1 },
+	{ A6XX_CP_PROTECT_REG + 9, 0x008d0, 0x0098c, 0 },
+	{ A6XX_CP_PROTECT_REG + 10, 0x00900, 0x0094d, 1 },
+	{ A6XX_CP_PROTECT_REG + 11, 0x0098d, 0x00bff, 1 },
+	{ A6XX_CP_PROTECT_REG + 12, 0x00e00, 0x00e0f, 1 },
+	{ A6XX_CP_PROTECT_REG + 13, 0x03c00, 0x03cc3, 1 },
+	{ A6XX_CP_PROTECT_REG + 14, 0x03cc4, 0x05cc3, 0 },
+	{ A6XX_CP_PROTECT_REG + 15, 0x08630, 0x087ff, 1 },
+	{ A6XX_CP_PROTECT_REG + 16, 0x08e00, 0x08e00, 1 },
+	{ A6XX_CP_PROTECT_REG + 17, 0x08e08, 0x08e08, 1 },
+	{ A6XX_CP_PROTECT_REG + 18, 0x08e50, 0x08e6f, 1 },
+	{ A6XX_CP_PROTECT_REG + 19, 0x09624, 0x097ff, 1 },
+	{ A6XX_CP_PROTECT_REG + 20, 0x09e70, 0x09e71, 1 },
+	{ A6XX_CP_PROTECT_REG + 21, 0x09e78, 0x09fff, 1 },
+	{ A6XX_CP_PROTECT_REG + 22, 0x0a630, 0x0a7ff, 1 },
+	{ A6XX_CP_PROTECT_REG + 23, 0x0ae02, 0x0ae02, 1 },
+	{ A6XX_CP_PROTECT_REG + 24, 0x0ae50, 0x0b17f, 1 },
+	{ A6XX_CP_PROTECT_REG + 25, 0x0b604, 0x0b604, 1 },
+	{ A6XX_CP_PROTECT_REG + 26, 0x0be02, 0x0be03, 1 },
+	{ A6XX_CP_PROTECT_REG + 27, 0x0be20, 0x0de1f, 1 },
+	{ A6XX_CP_PROTECT_REG + 28, 0x0f000, 0x0fbff, 1 },
+	{ A6XX_CP_PROTECT_REG + 29, 0x0fc00, 0x11bff, 0 },
+	{ A6XX_CP_PROTECT_REG + 31, 0x11c00, 0x00000, 1 },
+	{ 0 },
+};
+
 static const struct adreno_a6xx_core adreno_gpu_core_a630v2 = {
 	.base = {
 		DEFINE_ADRENO_REV(ADRENO_REV_A630, 6, 3, 0, ANY_ID),
@@ -749,7 +778,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a630v2 = {
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_1M,
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -764,6 +792,8 @@ static const struct adreno_a6xx_core adreno_gpu_core_a630v2 = {
 	.hwcg_count = ARRAY_SIZE(a630_hwcg_regs),
 	.vbif = a630_vbif_regs,
 	.vbif_count = ARRAY_SIZE(a630_vbif_regs),
+	.hang_detect_cycles = 0x3fffff,
+	.protected_regs = a630_protected_regs,
 };
 
 /* For a615, a616 and a618 */
@@ -847,7 +877,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a615 = {
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_512K,
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -862,6 +891,8 @@ static const struct adreno_a6xx_core adreno_gpu_core_a615 = {
 	.hwcg_count = ARRAY_SIZE(a615_hwcg_regs),
 	.vbif = a615_gbif_regs,
 	.vbif_count = ARRAY_SIZE(a615_gbif_regs),
+	.hang_detect_cycles = 0x3fffff,
+	.protected_regs = a630_protected_regs,
 };
 
 static const struct adreno_a6xx_core adreno_gpu_core_a618 = {
@@ -873,7 +904,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a618 = {
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_512K,
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -888,6 +918,8 @@ static const struct adreno_a6xx_core adreno_gpu_core_a618 = {
 	.hwcg_count = ARRAY_SIZE(a615_hwcg_regs),
 	.vbif = a615_gbif_regs,
 	.vbif_count = ARRAY_SIZE(a615_gbif_regs),
+	.hang_detect_cycles = 0x3fffff,
+	.protected_regs = a630_protected_regs,
 };
 
 static const struct adreno_reglist a620_hwcg_regs[] = {
@@ -951,6 +983,44 @@ static const struct adreno_reglist a650_gbif_regs[] = {
 	{A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3},
 };
 
+/* These are for a620 and a650 */
+static const struct a6xx_protected_regs a620_protected_regs[] = {
+	{ A6XX_CP_PROTECT_REG + 0, 0x00000, 0x004ff, 0 },
+	{ A6XX_CP_PROTECT_REG + 1, 0x00501, 0x00506, 0 },
+	{ A6XX_CP_PROTECT_REG + 2, 0x0050b, 0x007ff, 0 },
+	{ A6XX_CP_PROTECT_REG + 3, 0x0050e, 0x0050e, 1 },
+	{ A6XX_CP_PROTECT_REG + 4, 0x00510, 0x00510, 1 },
+	{ A6XX_CP_PROTECT_REG + 5, 0x00534, 0x00534, 1 },
+	{ A6XX_CP_PROTECT_REG + 6, 0x00800, 0x00882, 1 },
+	{ A6XX_CP_PROTECT_REG + 7, 0x008a0, 0x008a8, 1 },
+	{ A6XX_CP_PROTECT_REG + 8, 0x008ab, 0x008cf, 1 },
+	{ A6XX_CP_PROTECT_REG + 9, 0x008d0, 0x0098c, 0 },
+	{ A6XX_CP_PROTECT_REG + 10, 0x00900, 0x0094d, 1 },
+	{ A6XX_CP_PROTECT_REG + 11, 0x0098d, 0x00bff, 1 },
+	{ A6XX_CP_PROTECT_REG + 12, 0x00e00, 0x00e0f, 1 },
+	{ A6XX_CP_PROTECT_REG + 13, 0x03c00, 0x03cc3, 1 },
+	{ A6XX_CP_PROTECT_REG + 14, 0x03cc4, 0x05cc3, 0 },
+	{ A6XX_CP_PROTECT_REG + 15, 0x08630, 0x087ff, 1 },
+	{ A6XX_CP_PROTECT_REG + 16, 0x08e00, 0x08e00, 1 },
+	{ A6XX_CP_PROTECT_REG + 17, 0x08e08, 0x08e08, 1 },
+	{ A6XX_CP_PROTECT_REG + 18, 0x08e50, 0x08e6f, 1 },
+	{ A6XX_CP_PROTECT_REG + 19, 0x08e80, 0x090ff, 1 },
+	{ A6XX_CP_PROTECT_REG + 20, 0x09624, 0x097ff, 1 },
+	{ A6XX_CP_PROTECT_REG + 21, 0x09e60, 0x09e71, 1 },
+	{ A6XX_CP_PROTECT_REG + 22, 0x09e78, 0x09fff, 1 },
+	{ A6XX_CP_PROTECT_REG + 23, 0x0a630, 0x0a7ff, 1 },
+	{ A6XX_CP_PROTECT_REG + 24, 0x0ae02, 0x0ae02, 1 },
+	{ A6XX_CP_PROTECT_REG + 25, 0x0ae50, 0x0b17f, 1 },
+	{ A6XX_CP_PROTECT_REG + 26, 0x0b604, 0x0b604, 1 },
+	{ A6XX_CP_PROTECT_REG + 27, 0x0b608, 0x0b60f, 1 },
+	{ A6XX_CP_PROTECT_REG + 28, 0x0be02, 0x0be03, 1 },
+	{ A6XX_CP_PROTECT_REG + 29, 0x0be20, 0x0de1f, 1 },
+	{ A6XX_CP_PROTECT_REG + 30, 0x0f000, 0x0fbff, 1 },
+	{ A6XX_CP_PROTECT_REG + 31, 0x0fc00, 0x11bff, 0 },
+	{ A6XX_CP_PROTECT_REG + 47, 0x11c00, 0x00000, 1 },
+	{ 0 },
+};
+
 static const struct adreno_a6xx_core adreno_gpu_core_a620 = {
 	.base = {
 		DEFINE_ADRENO_REV(ADRENO_REV_A620, 6, 2, 0, 0),
@@ -960,7 +1030,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a620 = {
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0,
 		.gmem_size = SZ_512K,
-		.num_protected_regs = 0x30,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -976,6 +1045,9 @@ static const struct adreno_a6xx_core adreno_gpu_core_a620 = {
 	.vbif = a650_gbif_regs,
 	.vbif_count = ARRAY_SIZE(a650_gbif_regs),
 	.veto_fal10 = true,
+	.hang_detect_cycles = 0x3ffff,
+	.protected_regs = a620_protected_regs,
+	.disable_tseskip = true,
 };
 
 static const struct adreno_reglist a640_hwcg_regs[] = {
@@ -1030,7 +1102,7 @@ static const struct adreno_reglist a640_hwcg_regs[] = {
 	{A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
 };
 
-/* These apply to a640, a680 and a612 */
+/* These apply to a640, a680, a612 and a610 */
 static const struct adreno_reglist a640_vbif_regs[] = {
 	{A6XX_GBIF_QSB_SIDE0, 0x00071620},
 	{A6XX_GBIF_QSB_SIDE1, 0x00071620},
@@ -1048,7 +1120,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a640 = {
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_1M, //Verified 1MB
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -1063,6 +1134,9 @@ static const struct adreno_a6xx_core adreno_gpu_core_a640 = {
 	.hwcg_count = ARRAY_SIZE(a640_hwcg_regs),
 	.vbif = a640_vbif_regs,
 	.vbif_count = ARRAY_SIZE(a640_vbif_regs),
+	.hang_detect_cycles = 0x3fffff,
+	.protected_regs = a630_protected_regs,
+	.disable_tseskip = true,
 };
 
 static const struct adreno_reglist a650_hwcg_regs[] = {
@@ -1119,14 +1193,13 @@ static const struct adreno_reglist a650_hwcg_regs[] = {
 
 static const struct adreno_a6xx_core adreno_gpu_core_a650 = {
 	{
-		DEFINE_ADRENO_REV(ADRENO_REV_A650, 6, 5, 0, ANY_ID),
+		DEFINE_ADRENO_REV(ADRENO_REV_A650, 6, 5, 0, 0),
 		.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU |
 			ADRENO_IOCOHERENT | ADRENO_CONTENT_PROTECTION |
 			ADRENO_IFPC,
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0,
 		.gmem_size = SZ_1M + SZ_128K, /* verified 1152kB */
-		.num_protected_regs = 0x30,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -1141,6 +1214,37 @@ static const struct adreno_a6xx_core adreno_gpu_core_a650 = {
 	.vbif_count = ARRAY_SIZE(a650_gbif_regs),
 	.veto_fal10 = true,
 	.pdc_in_aop = true,
+	.hang_detect_cycles = 0x3fffff,
+	.protected_regs = a620_protected_regs,
+	.disable_tseskip = true,
+};
+
+static const struct adreno_a6xx_core adreno_gpu_core_a650v2 = {
+	{
+		DEFINE_ADRENO_REV(ADRENO_REV_A650, 6, 5, 0, ANY_ID),
+		.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU |
+			ADRENO_IOCOHERENT | ADRENO_CONTENT_PROTECTION |
+			ADRENO_IFPC | ADRENO_PREEMPTION,
+		.gpudev = &adreno_a6xx_gpudev,
+		.gmem_base = 0,
+		.gmem_size = SZ_1M + SZ_128K, /* verified 1152kB */
+		.busy_mask = 0xfffffffe,
+		.bus_width = 32,
+	},
+	.prim_fifo_threshold = 0x00300000,
+	.pdc_address_offset = 0x000300A0,
+	.gmu_major = 2,
+	.gmu_minor = 0,
+	.sqefw_name = "a650_sqe.fw",
+	.gmufw_name = "a650_gmu.bin",
+	.zap_name = "a650_zap",
+	.vbif = a650_gbif_regs,
+	.vbif_count = ARRAY_SIZE(a650_gbif_regs),
+	.veto_fal10 = true,
+	.pdc_in_aop = true,
+	.hang_detect_cycles = 0x3ffff,
+	.protected_regs = a620_protected_regs,
+	.disable_tseskip = true,
 };
 
 static const struct adreno_a6xx_core adreno_gpu_core_a680 = {
@@ -1150,7 +1254,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a680 = {
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_2M,
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -1165,6 +1268,9 @@ static const struct adreno_a6xx_core adreno_gpu_core_a680 = {
 	.hwcg_count = ARRAY_SIZE(a640_hwcg_regs),
 	.vbif = a640_vbif_regs,
 	.vbif_count = ARRAY_SIZE(a640_vbif_regs),
+	.hang_detect_cycles = 0x3fffff,
+	.protected_regs = a630_protected_regs,
+	.disable_tseskip = true,
 };
 
 static const struct adreno_reglist a612_hwcg_regs[] = {
@@ -1221,11 +1327,10 @@ static const struct adreno_a6xx_core adreno_gpu_core_a612 = {
 		DEFINE_ADRENO_REV(ADRENO_REV_A612, 6, 1, 2, ANY_ID),
 		.features = ADRENO_64BIT | ADRENO_CONTENT_PROTECTION |
 			ADRENO_IOCOHERENT | ADRENO_PREEMPTION | ADRENO_GPMU |
-			ADRENO_IFPC | ADRENO_PERFCTRL_RETAIN,
+			ADRENO_IFPC,
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = (SZ_128K + SZ_4K),
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -1238,6 +1343,8 @@ static const struct adreno_a6xx_core adreno_gpu_core_a612 = {
 	.hwcg_count = ARRAY_SIZE(a612_hwcg_regs),
 	.vbif = a640_vbif_regs,
 	.vbif_count = ARRAY_SIZE(a640_vbif_regs),
+	.hang_detect_cycles = 0x3fffff,
+	.protected_regs = a630_protected_regs,
 };
 
 static const struct adreno_a6xx_core adreno_gpu_core_a616 = {
@@ -1249,7 +1356,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a616 = {
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_512K,
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -1264,6 +1370,30 @@ static const struct adreno_a6xx_core adreno_gpu_core_a616 = {
 	.hwcg_count = ARRAY_SIZE(a615_hwcg_regs),
 	.vbif = a615_gbif_regs,
 	.vbif_count = ARRAY_SIZE(a615_gbif_regs),
+	.hang_detect_cycles = 0x3fffff,
+	.protected_regs = a630_protected_regs,
+};
+
+static const struct adreno_a6xx_core adreno_gpu_core_a610 = {
+	{
+		DEFINE_ADRENO_REV(ADRENO_REV_A610, 6, 1, 0, ANY_ID),
+		.features = ADRENO_64BIT | ADRENO_CONTENT_PROTECTION |
+			ADRENO_PREEMPTION,
+		.gpudev = &adreno_a6xx_gpudev,
+		.gmem_base = 0x100000,
+		.gmem_size = (SZ_128K + SZ_4K),
+		.busy_mask = 0xfffffffe,
+		.bus_width = 32,
+	},
+	.prim_fifo_threshold = 0x00080000,
+	.sqefw_name = "a630_sqe.fw",
+	.zap_name = "a610_zap",
+	.hwcg = a612_hwcg_regs,
+	.hwcg_count = ARRAY_SIZE(a612_hwcg_regs),
+	.vbif = a640_vbif_regs,
+	.vbif_count = ARRAY_SIZE(a640_vbif_regs),
+	.hang_detect_cycles = 0x3ffff,
+	.protected_regs = a630_protected_regs,
 };
 
 static const struct adreno_gpu_core *adreno_gpulist[] = {
@@ -1291,7 +1421,9 @@ static const struct adreno_gpu_core *adreno_gpulist[] = {
 	&adreno_gpu_core_a620.base,
 	&adreno_gpu_core_a640.base,
 	&adreno_gpu_core_a650.base,
+	&adreno_gpu_core_a650v2.base,
 	&adreno_gpu_core_a680.base,
 	&adreno_gpu_core_a612.base,
 	&adreno_gpu_core_a616.base,
+	&adreno_gpu_core_a610.base,
 };
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index a53171e..aa946dc 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -947,6 +947,9 @@ static int adreno_of_parse_pwrlevels(struct adreno_device *adreno_dev,
 			&level->gpu_freq))
 			return -EINVAL;
 
+		of_property_read_u32(child, "qcom,acd-level",
+			&level->acd_level);
+
 		ret = _of_property_read_ddrtype(child,
 			"qcom,bus-freq", &level->bus_freq);
 		if (ret) {
@@ -2244,10 +2247,13 @@ static inline bool adreno_try_soft_reset(struct kgsl_device *device, int fault)
 	 * needs a reset too) and also for below gpu
 	 * A304: It can't do SMMU programming of any kind after a soft reset
 	 * A612: IPC protocol between RGMU and CP will not restart after reset
+	 * A610: An across chip issue with reset line in all 11nm chips,
+	 * resulting in recommendation to not use soft reset
 	 */
 
 	if ((fault & ADRENO_IOMMU_PAGE_FAULT) || adreno_is_a304(adreno_dev) ||
-			adreno_is_a612(adreno_dev))
+			adreno_is_a612(adreno_dev) ||
+			adreno_is_a610(adreno_dev))
 		return false;
 
 	return true;
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index e152bed..3fea1f0 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -96,8 +96,6 @@
 #define ADRENO_MIN_VOLT BIT(15)
 /* The core supports IO-coherent memory */
 #define ADRENO_IOCOHERENT BIT(16)
-/* To retain RBBM perfcntl enable setting in IFPC */
-#define ADRENO_PERFCTRL_RETAIN BIT(17)
 /*
  * The GMU supports Adaptive Clock Distribution (ACD)
  * for droop mitigation
@@ -195,6 +193,7 @@ enum adreno_gpurev {
 	ADRENO_REV_A512 = 512,
 	ADRENO_REV_A530 = 530,
 	ADRENO_REV_A540 = 540,
+	ADRENO_REV_A610 = 610,
 	ADRENO_REV_A612 = 612,
 	ADRENO_REV_A615 = 615,
 	ADRENO_REV_A616 = 616,
@@ -352,7 +351,6 @@ struct adreno_reglist {
  * @gpudev: Pointer to the GPU family specific functions for this core
  * @gmem_base: Base address of binning memory (GMEM/OCMEM)
  * @gmem_size: Amount of binning memory (GMEM/OCMEM) to reserve for the core
- * @num_protected_regs: number of protected registers
  * @busy_mask: mask to check if GPU is busy in RBBM_STATUS
  * @bus_width: Bytes transferred in 1 cycle
  */
@@ -363,7 +361,6 @@ struct adreno_gpu_core {
 	struct adreno_gpudev *gpudev;
 	unsigned long gmem_base;
 	size_t gmem_size;
-	unsigned int num_protected_regs;
 	unsigned int busy_mask;
 	u32 bus_width;
 };
@@ -927,7 +924,7 @@ struct adreno_gpudev {
 	void (*gpu_keepalive)(struct adreno_device *adreno_dev,
 			bool state);
 	bool (*hw_isidle)(struct adreno_device *adreno_dev);
-	const char *(*iommu_fault_block)(struct adreno_device *adreno_dev,
+	const char *(*iommu_fault_block)(struct kgsl_device *device,
 				unsigned int fsynr1);
 	int (*reset)(struct kgsl_device *device, int fault);
 	int (*soft_reset)(struct adreno_device *adreno_dev);
@@ -1160,6 +1157,7 @@ static inline int adreno_is_a6xx(struct adreno_device *adreno_dev)
 			ADRENO_GPUREV(adreno_dev) < 700;
 }
 
+ADRENO_TARGET(a610, ADRENO_REV_A610)
 ADRENO_TARGET(a612, ADRENO_REV_A612)
 ADRENO_TARGET(a618, ADRENO_REV_A618)
 ADRENO_TARGET(a620, ADRENO_REV_A620)
@@ -1457,43 +1455,6 @@ static inline void adreno_put_gpu_halt(struct adreno_device *adreno_dev)
 void adreno_reglist_write(struct adreno_device *adreno_dev,
 		const struct adreno_reglist *list, u32 count);
 
-/**
- * adreno_set_protected_registers() - Protect the specified range of registers
- * from being accessed by the GPU
- * @adreno_dev: pointer to the Adreno device
- * @index: Pointer to the index of the protect mode register to write to
- * @reg: Starting dword register to write
- * @mask_len: Size of the mask to protect (# of registers = 2 ** mask_len)
- *
- * Add the range of registers to the list of protected mode registers that will
- * cause an exception if the GPU accesses them.  There are 16 available
- * protected mode registers.  Index is used to specify which register to write
- * to - the intent is to call this function multiple times with the same index
- * pointer for each range and the registers will be magically programmed in
- * incremental fashion
- */
-static inline void adreno_set_protected_registers(
-		struct adreno_device *adreno_dev, unsigned int *index,
-		unsigned int reg, int mask_len)
-{
-	unsigned int val;
-	unsigned int base =
-		adreno_getreg(adreno_dev, ADRENO_REG_CP_PROTECT_REG_0);
-	unsigned int offset = *index;
-	unsigned int max_slots = adreno_dev->gpucore->num_protected_regs ?
-				adreno_dev->gpucore->num_protected_regs : 16;
-
-	/* Do we have a free slot? */
-	if (WARN(*index >= max_slots, "Protected register slots full: %d/%d\n",
-					*index, max_slots))
-		return;
-
-	val = 0x60000000 | ((mask_len & 0x1F) << 24) | ((reg << 2) & 0xFFFFF);
-
-	kgsl_regwrite(KGSL_DEVICE(adreno_dev), base + offset, val);
-	*index = *index + 1;
-}
-
 #ifdef CONFIG_DEBUG_FS
 void adreno_debugfs_init(struct adreno_device *adreno_dev);
 void adreno_context_debugfs_init(struct adreno_device *adreno_dev,
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index 71306b4..680afa0 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -1070,49 +1070,48 @@ static void a3xx_perfcounter_init(struct adreno_device *adreno_dev)
 	}
 }
 
-/**
- * a3xx_protect_init() - Initializes register protection on a3xx
- * @adreno_dev: Pointer to the device structure
- * Performs register writes to enable protected access to sensitive
- * registers
- */
-static void a3xx_protect_init(struct adreno_device *adreno_dev)
-{
-	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-	int index = 0;
-	struct kgsl_protected_registers *iommu_regs;
+struct {
+	u32 reg;
+	u32 base;
+	u32 count;
+} a3xx_protected_blocks[] = {
+	/* RBBM */
+	{ A3XX_CP_PROTECT_REG_0,      0x0018, 0 },
+	{ A3XX_CP_PROTECT_REG_0 + 1,  0x0020, 2 },
+	{ A3XX_CP_PROTECT_REG_0 + 2,  0x0033, 0 },
+	{ A3XX_CP_PROTECT_REG_0 + 3,  0x0042, 0 },
+	{ A3XX_CP_PROTECT_REG_0 + 4,  0x0050, 4 },
+	{ A3XX_CP_PROTECT_REG_0 + 5,  0x0063, 0 },
+	{ A3XX_CP_PROTECT_REG_0 + 6,  0x0100, 4 },
+	/* CP */
+	{ A3XX_CP_PROTECT_REG_0 + 7,  0x01c0, 5 },
+	{ A3XX_CP_PROTECT_REG_0 + 8,  0x01ec, 1 },
+	{ A3XX_CP_PROTECT_REG_0 + 9,  0x01f6, 1 },
+	{ A3XX_CP_PROTECT_REG_0 + 10, 0x01f8, 2 },
+	{ A3XX_CP_PROTECT_REG_0 + 11, 0x045e, 2 },
+	{ A3XX_CP_PROTECT_REG_0 + 12, 0x0460, 4 },
+	/* RB */
+	{ A3XX_CP_PROTECT_REG_0 + 13, 0x0cc0, 0 },
+	/* VBIF */
+	{ A3XX_CP_PROTECT_REG_0 + 14, 0x3000, 6 },
+	/* SMMU */
+	{ A3XX_CP_PROTECT_REG_0 + 15, 0xa000, 12 },
+	/* There are no remaining protected mode registers for a3xx */
+};
 
-	/* enable access protection to privileged registers */
+static void a3xx_protect_init(struct kgsl_device *device)
+{
+	int i;
+
 	kgsl_regwrite(device, A3XX_CP_PROTECT_CTRL, 0x00000007);
 
-	/* RBBM registers */
-	adreno_set_protected_registers(adreno_dev, &index, 0x18, 0);
-	adreno_set_protected_registers(adreno_dev, &index, 0x20, 2);
-	adreno_set_protected_registers(adreno_dev, &index, 0x33, 0);
-	adreno_set_protected_registers(adreno_dev, &index, 0x42, 0);
-	adreno_set_protected_registers(adreno_dev, &index, 0x50, 4);
-	adreno_set_protected_registers(adreno_dev, &index, 0x63, 0);
-	adreno_set_protected_registers(adreno_dev, &index, 0x100, 4);
+	for (i = 0; i < ARRAY_SIZE(a3xx_protected_blocks); i++) {
+		u32 val = 0x60000000 |
+			(a3xx_protected_blocks[i].count << 24) |
+			(a3xx_protected_blocks[i].base << 2);
 
-	/* CP registers */
-	adreno_set_protected_registers(adreno_dev, &index, 0x1C0, 5);
-	adreno_set_protected_registers(adreno_dev, &index, 0x1EC, 1);
-	adreno_set_protected_registers(adreno_dev, &index, 0x1F6, 1);
-	adreno_set_protected_registers(adreno_dev, &index, 0x1F8, 2);
-	adreno_set_protected_registers(adreno_dev, &index, 0x45E, 2);
-	adreno_set_protected_registers(adreno_dev, &index, 0x460, 4);
-
-	/* RB registers */
-	adreno_set_protected_registers(adreno_dev, &index, 0xCC0, 0);
-
-	/* VBIF registers */
-	adreno_set_protected_registers(adreno_dev, &index, 0x3000, 6);
-
-	/* SMMU registers */
-	iommu_regs = kgsl_mmu_get_prot_regs(&device->mmu);
-	if (iommu_regs)
-		adreno_set_protected_registers(adreno_dev, &index,
-				iommu_regs->base, ilog2(iommu_regs->range));
+		kgsl_regwrite(device, a3xx_protected_blocks[i].reg, val);
+	}
 }
 
 static void a3xx_start(struct adreno_device *adreno_dev)
@@ -1161,7 +1160,7 @@ static void a3xx_start(struct adreno_device *adreno_dev)
 	kgsl_regwrite(device, A3XX_RBBM_CLOCK_CTL, A3XX_RBBM_CLOCK_CTL_DEFAULT);
 
 	/* Turn on protection */
-	a3xx_protect_init(adreno_dev);
+	a3xx_protect_init(device);
 
 	/* Turn on performance counters */
 	kgsl_regwrite(device, A3XX_RBBM_PERFCTR_CTL, 0x01);
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index a678866..12f5c21 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -293,57 +293,69 @@ static void a5xx_remove(struct adreno_device *adreno_dev)
 		a5xx_critical_packet_destroy(adreno_dev);
 }
 
-/**
- * a5xx_protect_init() - Initializes register protection on a5xx
- * @device: Pointer to the device structure
- * Performs register writes to enable protected access to sensitive
- * registers
- */
+const static struct {
+	u32 reg;
+	u32 base;
+	u32 count;
+} a5xx_protected_blocks[] = {
+	/* RBBM */
+	{  A5XX_CP_PROTECT_REG_0,     0x004, 2 },
+	{  A5XX_CP_PROTECT_REG_0 + 1, 0x008, 3 },
+	{  A5XX_CP_PROTECT_REG_0 + 2, 0x010, 4 },
+	{  A5XX_CP_PROTECT_REG_0 + 3, 0x020, 5 },
+	{  A5XX_CP_PROTECT_REG_0 + 4, 0x040, 6 },
+	{  A5XX_CP_PROTECT_REG_0 + 5, 0x080, 6 },
+	/* Content protection */
+	{  A5XX_CP_PROTECT_REG_0 + 6, A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 4 },
+	{  A5XX_CP_PROTECT_REG_0 + 7, A5XX_RBBM_SECVID_TRUST_CNTL, 1 },
+	/* CP */
+	{  A5XX_CP_PROTECT_REG_0 + 8, 0x800, 6 },
+	{  A5XX_CP_PROTECT_REG_0 + 9, 0x840, 3 },
+	{  A5XX_CP_PROTECT_REG_0 + 10, 0x880, 5 },
+	{  A5XX_CP_PROTECT_REG_0 + 11, 0xaa0, 0 },
+	/* RB */
+	{  A5XX_CP_PROTECT_REG_0 + 12, 0xcc0, 0 },
+	{  A5XX_CP_PROTECT_REG_0 + 13, 0xcf0, 1 },
+	/* VPC */
+	{  A5XX_CP_PROTECT_REG_0 + 14, 0xe68, 3 },
+	{  A5XX_CP_PROTECT_REG_0 + 15, 0xe70, 4 },
+	/* UCHE */
+	{  A5XX_CP_PROTECT_REG_0 + 16, 0xe80, 4 },
+	/* A5XX_CP_PROTECT_REG_17 will be used for SMMU */
+	/* A5XX_CP_PROTECT_REG_18 - A5XX_CP_PROTECT_REG_31 are available */
+};
+
+static void _setprotectreg(struct kgsl_device *device, u32 offset,
+		u32 base, u32 count)
+{
+	kgsl_regwrite(device, offset, 0x60000000 | (count << 24) | (base << 2));
+}
+
 static void a5xx_protect_init(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-	int index = 0;
-	struct kgsl_protected_registers *iommu_regs;
+	u32 reg;
+	int i;
 
 	/* enable access protection to privileged registers */
 	kgsl_regwrite(device, A5XX_CP_PROTECT_CNTL, 0x00000007);
 
-	/* RBBM registers */
-	adreno_set_protected_registers(adreno_dev, &index, 0x4, 2);
-	adreno_set_protected_registers(adreno_dev, &index, 0x8, 3);
-	adreno_set_protected_registers(adreno_dev, &index, 0x10, 4);
-	adreno_set_protected_registers(adreno_dev, &index, 0x20, 5);
-	adreno_set_protected_registers(adreno_dev, &index, 0x40, 6);
-	adreno_set_protected_registers(adreno_dev, &index, 0x80, 6);
+	for (i = 0; i < ARRAY_SIZE(a5xx_protected_blocks); i++) {
+		reg = a5xx_protected_blocks[i].reg;
 
-	/* Content protection registers */
-	adreno_set_protected_registers(adreno_dev, &index,
-		   A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 4);
-	adreno_set_protected_registers(adreno_dev, &index,
-		   A5XX_RBBM_SECVID_TRUST_CNTL, 1);
+		_setprotectreg(device, reg, a5xx_protected_blocks[i].base,
+			a5xx_protected_blocks[i].count);
+	}
 
-	/* CP registers */
-	adreno_set_protected_registers(adreno_dev, &index, 0x800, 6);
-	adreno_set_protected_registers(adreno_dev, &index, 0x840, 3);
-	adreno_set_protected_registers(adreno_dev, &index, 0x880, 5);
-	adreno_set_protected_registers(adreno_dev, &index, 0x0AA0, 0);
-
-	/* RB registers */
-	adreno_set_protected_registers(adreno_dev, &index, 0xCC0, 0);
-	adreno_set_protected_registers(adreno_dev, &index, 0xCF0, 1);
-
-	/* VPC registers */
-	adreno_set_protected_registers(adreno_dev, &index, 0xE68, 3);
-	adreno_set_protected_registers(adreno_dev, &index, 0xE70, 4);
-
-	/* UCHE registers */
-	adreno_set_protected_registers(adreno_dev, &index, 0xE80, ilog2(16));
-
-	/* SMMU registers */
-	iommu_regs = kgsl_mmu_get_prot_regs(&device->mmu);
-	if (iommu_regs)
-		adreno_set_protected_registers(adreno_dev, &index,
-				iommu_regs->base, ilog2(iommu_regs->range));
+	/*
+	 * For a530 and a540 the SMMU region is 0x20000 bytes long and 0x10000
+	 * bytes on all other targets. The base offset for both is 0x40000.
+	 * Write it to the next available slot
+	 */
+	if (adreno_is_a530(adreno_dev) || adreno_is_a540(adreno_dev))
+		_setprotectreg(device, reg + 1, 0x40000, ilog2(0x20000));
+	else
+		_setprotectreg(device, reg + 1, 0x40000, ilog2(0x10000));
 }
 
 /*
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index a0b5171..6df5726 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -7,6 +7,7 @@
 #include <linux/of.h>
 #include <linux/of_fdt.h>
 #include <soc/qcom/subsystem_restart.h>
+#include <linux/clk/qcom.h>
 
 #include "adreno.h"
 #include "adreno_a6xx.h"
@@ -15,123 +16,90 @@
 #include "adreno_trace.h"
 #include "kgsl_trace.h"
 
-static struct a6xx_protected_regs {
-	unsigned int base;
-	unsigned int count;
-	int read_protect;
-} a6xx_protected_regs_group[] = {
-	{ 0x600, 0x51, 0 },
-	{ 0xAE50, 0x2, 1 },
-	{ 0x9624, 0x13, 1 },
-	{ 0x8630, 0x8, 1 },
-	{ 0x9E70, 0x1, 1 },
-	{ 0x9E78, 0x187, 1 },
-	{ 0xF000, 0x810, 1 },
-	{ 0xFC00, 0x3, 0 },
-	{ 0x50E, 0x0, 1 },
-	{ 0x50F, 0x0, 0 },
-	{ 0x510, 0x0, 1 },
-	{ 0x0, 0x4F9, 0 },
-	{ 0x501, 0xA, 0 },
-	{ 0x511, 0x44, 0 },
-	{ 0xE00, 0x1, 1 },
-	{ 0xE03, 0xB, 1 },
-	{ 0x8E00, 0x0, 1 },
-	{ 0x8E50, 0xF, 1 },
-	{ 0xBE02, 0x0, 1 },
-	{ 0xBE20, 0x11F3, 1 },
-	{ 0x800, 0x82, 1 },
-	{ 0x8A0, 0x8, 1 },
-	{ 0x8AB, 0x19, 1 },
-	{ 0x900, 0x4D, 1 },
-	{ 0x98D, 0x76, 1 },
-	{ 0x8D0, 0x23, 0 },
-	{ 0x980, 0x4, 0 },
-	{ 0xA630, 0x0, 1 },
-};
-
 /* IFPC & Preemption static powerup restore list */
-static struct reg_list_pair {
-	uint32_t offset;
-	uint32_t val;
-} a6xx_pwrup_reglist[] = {
-	{ A6XX_VSC_ADDR_MODE_CNTL, 0x0 },
-	{ A6XX_GRAS_ADDR_MODE_CNTL, 0x0 },
-	{ A6XX_RB_ADDR_MODE_CNTL, 0x0 },
-	{ A6XX_PC_ADDR_MODE_CNTL, 0x0 },
-	{ A6XX_HLSQ_ADDR_MODE_CNTL, 0x0 },
-	{ A6XX_VFD_ADDR_MODE_CNTL, 0x0 },
-	{ A6XX_VPC_ADDR_MODE_CNTL, 0x0 },
-	{ A6XX_UCHE_ADDR_MODE_CNTL, 0x0 },
-	{ A6XX_SP_ADDR_MODE_CNTL, 0x0 },
-	{ A6XX_TPL1_ADDR_MODE_CNTL, 0x0 },
-	{ A6XX_UCHE_WRITE_RANGE_MAX_LO, 0x0 },
-	{ A6XX_UCHE_WRITE_RANGE_MAX_HI, 0x0 },
-	{ A6XX_UCHE_TRAP_BASE_LO, 0x0 },
-	{ A6XX_UCHE_TRAP_BASE_HI, 0x0 },
-	{ A6XX_UCHE_WRITE_THRU_BASE_LO, 0x0 },
-	{ A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0 },
-	{ A6XX_UCHE_GMEM_RANGE_MIN_LO, 0x0 },
-	{ A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x0 },
-	{ A6XX_UCHE_GMEM_RANGE_MAX_LO, 0x0 },
-	{ A6XX_UCHE_GMEM_RANGE_MAX_HI, 0x0 },
-	{ A6XX_UCHE_FILTER_CNTL, 0x0 },
-	{ A6XX_UCHE_CACHE_WAYS, 0x0 },
-	{ A6XX_UCHE_MODE_CNTL, 0x0 },
-	{ A6XX_RB_NC_MODE_CNTL, 0x0 },
-	{ A6XX_TPL1_NC_MODE_CNTL, 0x0 },
-	{ A6XX_SP_NC_MODE_CNTL, 0x0 },
-	{ A6XX_PC_DBG_ECO_CNTL, 0x0 },
-	{ A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE, 0x0 },
+static u32 a6xx_pwrup_reglist[] = {
+	A6XX_VSC_ADDR_MODE_CNTL,
+	A6XX_GRAS_ADDR_MODE_CNTL,
+	A6XX_RB_ADDR_MODE_CNTL,
+	A6XX_PC_ADDR_MODE_CNTL,
+	A6XX_HLSQ_ADDR_MODE_CNTL,
+	A6XX_VFD_ADDR_MODE_CNTL,
+	A6XX_VPC_ADDR_MODE_CNTL,
+	A6XX_UCHE_ADDR_MODE_CNTL,
+	A6XX_SP_ADDR_MODE_CNTL,
+	A6XX_TPL1_ADDR_MODE_CNTL,
+	A6XX_UCHE_WRITE_RANGE_MAX_LO,
+	A6XX_UCHE_WRITE_RANGE_MAX_HI,
+	A6XX_UCHE_TRAP_BASE_LO,
+	A6XX_UCHE_TRAP_BASE_HI,
+	A6XX_UCHE_WRITE_THRU_BASE_LO,
+	A6XX_UCHE_WRITE_THRU_BASE_HI,
+	A6XX_UCHE_GMEM_RANGE_MIN_LO,
+	A6XX_UCHE_GMEM_RANGE_MIN_HI,
+	A6XX_UCHE_GMEM_RANGE_MAX_LO,
+	A6XX_UCHE_GMEM_RANGE_MAX_HI,
+	A6XX_UCHE_FILTER_CNTL,
+	A6XX_UCHE_CACHE_WAYS,
+	A6XX_UCHE_MODE_CNTL,
+	A6XX_RB_NC_MODE_CNTL,
+	A6XX_TPL1_NC_MODE_CNTL,
+	A6XX_SP_NC_MODE_CNTL,
+	A6XX_PC_DBG_ECO_CNTL,
+	A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE,
 };
 
 /* IFPC only static powerup restore list */
-static struct reg_list_pair a6xx_ifpc_pwrup_reglist[] = {
-	{ A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x0 },
-	{ A6XX_CP_CHICKEN_DBG, 0x0 },
-	{ A6XX_CP_DBG_ECO_CNTL, 0x0 },
-	{ A6XX_CP_PROTECT_CNTL, 0x0 },
-	{ A6XX_CP_PROTECT_REG, 0x0 },
-	{ A6XX_CP_PROTECT_REG+1, 0x0 },
-	{ A6XX_CP_PROTECT_REG+2, 0x0 },
-	{ A6XX_CP_PROTECT_REG+3, 0x0 },
-	{ A6XX_CP_PROTECT_REG+4, 0x0 },
-	{ A6XX_CP_PROTECT_REG+5, 0x0 },
-	{ A6XX_CP_PROTECT_REG+6, 0x0 },
-	{ A6XX_CP_PROTECT_REG+7, 0x0 },
-	{ A6XX_CP_PROTECT_REG+8, 0x0 },
-	{ A6XX_CP_PROTECT_REG+9, 0x0 },
-	{ A6XX_CP_PROTECT_REG+10, 0x0 },
-	{ A6XX_CP_PROTECT_REG+11, 0x0 },
-	{ A6XX_CP_PROTECT_REG+12, 0x0 },
-	{ A6XX_CP_PROTECT_REG+13, 0x0 },
-	{ A6XX_CP_PROTECT_REG+14, 0x0 },
-	{ A6XX_CP_PROTECT_REG+15, 0x0 },
-	{ A6XX_CP_PROTECT_REG+16, 0x0 },
-	{ A6XX_CP_PROTECT_REG+17, 0x0 },
-	{ A6XX_CP_PROTECT_REG+18, 0x0 },
-	{ A6XX_CP_PROTECT_REG+19, 0x0 },
-	{ A6XX_CP_PROTECT_REG+20, 0x0 },
-	{ A6XX_CP_PROTECT_REG+21, 0x0 },
-	{ A6XX_CP_PROTECT_REG+22, 0x0 },
-	{ A6XX_CP_PROTECT_REG+23, 0x0 },
-	{ A6XX_CP_PROTECT_REG+24, 0x0 },
-	{ A6XX_CP_PROTECT_REG+25, 0x0 },
-	{ A6XX_CP_PROTECT_REG+26, 0x0 },
-	{ A6XX_CP_PROTECT_REG+27, 0x0 },
-	{ A6XX_CP_PROTECT_REG+28, 0x0 },
-	{ A6XX_CP_PROTECT_REG+29, 0x0 },
-	{ A6XX_CP_PROTECT_REG+30, 0x0 },
-	{ A6XX_CP_PROTECT_REG+31, 0x0 },
-	{ A6XX_CP_AHB_CNTL, 0x0 },
+static u32 a6xx_ifpc_pwrup_reglist[] = {
+	A6XX_RBBM_VBIF_CLIENT_QOS_CNTL,
+	A6XX_CP_CHICKEN_DBG,
+	A6XX_CP_DBG_ECO_CNTL,
+	A6XX_CP_PROTECT_CNTL,
+	A6XX_CP_PROTECT_REG,
+	A6XX_CP_PROTECT_REG+1,
+	A6XX_CP_PROTECT_REG+2,
+	A6XX_CP_PROTECT_REG+3,
+	A6XX_CP_PROTECT_REG+4,
+	A6XX_CP_PROTECT_REG+5,
+	A6XX_CP_PROTECT_REG+6,
+	A6XX_CP_PROTECT_REG+7,
+	A6XX_CP_PROTECT_REG+8,
+	A6XX_CP_PROTECT_REG+9,
+	A6XX_CP_PROTECT_REG+10,
+	A6XX_CP_PROTECT_REG+11,
+	A6XX_CP_PROTECT_REG+12,
+	A6XX_CP_PROTECT_REG+13,
+	A6XX_CP_PROTECT_REG+14,
+	A6XX_CP_PROTECT_REG+15,
+	A6XX_CP_PROTECT_REG+16,
+	A6XX_CP_PROTECT_REG+17,
+	A6XX_CP_PROTECT_REG+18,
+	A6XX_CP_PROTECT_REG+19,
+	A6XX_CP_PROTECT_REG+20,
+	A6XX_CP_PROTECT_REG+21,
+	A6XX_CP_PROTECT_REG+22,
+	A6XX_CP_PROTECT_REG+23,
+	A6XX_CP_PROTECT_REG+24,
+	A6XX_CP_PROTECT_REG+25,
+	A6XX_CP_PROTECT_REG+26,
+	A6XX_CP_PROTECT_REG+27,
+	A6XX_CP_PROTECT_REG+28,
+	A6XX_CP_PROTECT_REG+29,
+	A6XX_CP_PROTECT_REG+30,
+	A6XX_CP_PROTECT_REG+31,
+	A6XX_CP_AHB_CNTL,
 };
 
-static struct reg_list_pair a615_pwrup_reglist[] = {
-	{ A6XX_UCHE_GBIF_GX_CONFIG, 0x0 },
+/* a620 and a650 need to program A6XX_CP_PROTECT_REG_47 for the infinite span */
+static u32 a650_pwrup_reglist[] = {
+	A6XX_CP_PROTECT_REG + 47,
 };
 
-static struct reg_list_pair a6xx_ifpc_perfctr_reglist[] = {
-	{ A6XX_RBBM_PERFCTR_CNTL, 0x0 },
+static u32 a615_pwrup_reglist[] = {
+	A6XX_UCHE_GBIF_GX_CONFIG,
+};
+
+static u32 a612_pwrup_reglist[] = {
+	A6XX_RBBM_PERFCTR_CNTL,
 };
 
 static void _update_always_on_regs(struct adreno_device *adreno_dev)
@@ -145,21 +113,6 @@ static void _update_always_on_regs(struct adreno_device *adreno_dev)
 		A6XX_CP_ALWAYS_ON_COUNTER_HI;
 }
 
-static void a6xx_pwrup_reglist_init(struct adreno_device *adreno_dev)
-{
-	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-
-	if (kgsl_allocate_global(device, &adreno_dev->pwrup_reglist,
-		PAGE_SIZE, 0, KGSL_MEMDESC_CONTIG | KGSL_MEMDESC_PRIVILEGED,
-		"powerup_register_list")) {
-		adreno_dev->pwrup_reglist.gpuaddr = 0;
-		return;
-	}
-
-	kgsl_sharedmem_set(device, &adreno_dev->pwrup_reglist, 0, 0,
-		PAGE_SIZE);
-}
-
 static void a6xx_init(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -185,64 +138,42 @@ static void a6xx_init(struct adreno_device *adreno_dev)
 	 * If the GMU is not enabled, rewrite the offset for the always on
 	 * counters to point to the CP always on instead of GMU always on
 	 */
-	if (!gmu_core_isenabled(KGSL_DEVICE(adreno_dev)))
+	if (!gmu_core_isenabled(device))
 		_update_always_on_regs(adreno_dev);
 
-	a6xx_pwrup_reglist_init(adreno_dev);
+	kgsl_allocate_global(device, &adreno_dev->pwrup_reglist,
+		PAGE_SIZE, 0, KGSL_MEMDESC_CONTIG | KGSL_MEMDESC_PRIVILEGED,
+		"powerup_register_list");
 }
 
-/**
- * a6xx_protect_init() - Initializes register protection on a6xx
- * @device: Pointer to the device structure
- * Performs register writes to enable protected access to sensitive
- * registers
- */
 static void a6xx_protect_init(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-	struct kgsl_protected_registers *mmu_prot =
-		kgsl_mmu_get_prot_regs(&device->mmu);
-	int i, num_sets;
-	int req_sets = ARRAY_SIZE(a6xx_protected_regs_group);
-	int max_sets = adreno_dev->gpucore->num_protected_regs;
-	unsigned int mmu_base = 0, mmu_range = 0, cur_range;
+	const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
+	const struct a6xx_protected_regs *regs = a6xx_core->protected_regs;
+	int i;
 
-	/* enable access protection to privileged registers */
-	kgsl_regwrite(device, A6XX_CP_PROTECT_CNTL, 0x00000003);
+	/*
+	 * Enable access protection to privileged registers, fault on an access
+	 * protect violation and select the last span to protect from the start
+	 * address all the way to the end of the register address space
+	 */
+	kgsl_regwrite(device, A6XX_CP_PROTECT_CNTL,
+		(1 << 0) | (1 << 1) | (1 << 3));
 
-	if (mmu_prot) {
-		mmu_base = mmu_prot->base;
-		mmu_range = mmu_prot->range;
-		req_sets += DIV_ROUND_UP(mmu_range, 0x2000);
-	}
+	/* Program each register defined by the core definition */
+	for (i = 0; regs[i].reg; i++) {
+		u32 count;
 
-	WARN(req_sets > max_sets,
-		"Size exceeds the num of protection regs available\n");
+		/*
+		 * This is the offset of the end register as counted from the
+		 * start, i.e. # of registers in the range - 1
+		 */
+		count = regs[i].end - regs[i].start;
 
-	/* Protect GPU registers */
-	num_sets = min_t(unsigned int,
-		ARRAY_SIZE(a6xx_protected_regs_group), max_sets);
-	for (i = 0; i < num_sets; i++) {
-		struct a6xx_protected_regs *regs =
-					&a6xx_protected_regs_group[i];
-
-		kgsl_regwrite(device, A6XX_CP_PROTECT_REG + i,
-				regs->base | (regs->count << 18) |
-				(regs->read_protect << 31));
-	}
-
-	/* Protect MMU registers */
-	if (mmu_prot) {
-		while ((i < max_sets) && (mmu_range > 0)) {
-			cur_range = min_t(unsigned int, mmu_range,
-						0x2000);
-			kgsl_regwrite(device, A6XX_CP_PROTECT_REG + i,
-				mmu_base | ((cur_range - 1) << 18) | (1 << 31));
-
-			mmu_base += cur_range;
-			mmu_range -= cur_range;
-			i++;
-		}
+		kgsl_regwrite(device, regs[i].reg,
+			regs[i].start | (count << 18) |
+			(regs[i].noaccess << 31));
 	}
 }
 
@@ -269,7 +200,7 @@ __get_rbbm_clock_cntl_on(struct adreno_device *adreno_dev)
 {
 	if (adreno_is_a630(adreno_dev))
 		return 0x8AA8AA02;
-	else if (adreno_is_a612(adreno_dev))
+	else if (adreno_is_a612(adreno_dev) || adreno_is_a610(adreno_dev))
 		return 0xAAA8AA82;
 	else
 		return 0x8AA8AA82;
@@ -348,11 +279,12 @@ static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
 
 	/*
 	 * Disable SP clock before programming HWCG registers.
-	 * A612 GPU is not having the GX power domain. Hence
-	 * skip GMU_GX registers for A12.
+	 * A612 and A610 GPU is not having the GX power domain.
+	 * Hence skip GMU_GX registers for A12 and A610.
 	 */
 
-	if (gmu_core_isenabled(device) && !adreno_is_a612(adreno_dev))
+	if (gmu_core_isenabled(device) && !adreno_is_a612(adreno_dev) &&
+		!adreno_is_a610(adreno_dev))
 		gmu_core_regrmw(device,
 			A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
 
@@ -362,10 +294,11 @@ static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
 
 	/*
 	 * Enable SP clock after programming HWCG registers.
-	 * A612 GPU is not having the GX power domain. Hence
-	 * skip GMU_GX registers for A612.
+	 * A612 and A610 GPU is not having the GX power domain.
+	 * Hence skip GMU_GX registers for A612.
 	 */
-	if (gmu_core_isenabled(device) && !adreno_is_a612(adreno_dev))
+	if (gmu_core_isenabled(device) && !adreno_is_a612(adreno_dev) &&
+		!adreno_is_a610(adreno_dev))
 		gmu_core_regrmw(device,
 			A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
 
@@ -374,80 +307,63 @@ static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
 		on ? __get_rbbm_clock_cntl_on(adreno_dev) : 0);
 }
 
+struct a6xx_reglist_list {
+	u32 *regs;
+	u32 count;
+};
+
+#define REGLIST(_a) \
+	 (struct a6xx_reglist_list) { .regs = _a, .count = ARRAY_SIZE(_a), }
+
 static void a6xx_patch_pwrup_reglist(struct adreno_device *adreno_dev)
 {
-	uint32_t i;
-	struct cpu_gpu_lock *lock;
-	struct reg_list_pair *r;
+	struct a6xx_reglist_list reglist[3];
+	void *ptr = adreno_dev->pwrup_reglist.hostptr;
+	struct cpu_gpu_lock *lock = ptr;
+	int items = 0, i, j;
+	u32 *dest = ptr + sizeof(*lock);
 
-	/* Set up the register values */
-	for (i = 0; i < ARRAY_SIZE(a6xx_ifpc_pwrup_reglist); i++) {
-		r = &a6xx_ifpc_pwrup_reglist[i];
-		kgsl_regread(KGSL_DEVICE(adreno_dev), r->offset, &r->val);
+	/* Static IFPC-only registers */
+	reglist[items++] = REGLIST(a6xx_ifpc_pwrup_reglist);
+
+	/* Static IFPC + preemption registers */
+	reglist[items++] = REGLIST(a6xx_pwrup_reglist);
+
+	/* Add target specific registers */
+	if (adreno_is_a612(adreno_dev))
+		reglist[items++] = REGLIST(a612_pwrup_reglist);
+	else if (adreno_is_a615_family(adreno_dev))
+		reglist[items++] = REGLIST(a615_pwrup_reglist);
+	else if (adreno_is_a650(adreno_dev) || adreno_is_a620(adreno_dev))
+		reglist[items++] = REGLIST(a650_pwrup_reglist);
+
+	/*
+	 * For each entry in each of the lists, write the offset and the current
+	 * register value into the GPU buffer
+	 */
+	for (i = 0; i < items; i++) {
+		u32 *r = reglist[i].regs;
+
+		for (j = 0; j < reglist[i].count; j++) {
+			*dest++ = r[j];
+			kgsl_regread(KGSL_DEVICE(adreno_dev), r[j], dest++);
+		}
+
+		lock->list_length += reglist[i].count * 2;
 	}
 
-	for (i = 0; i < ARRAY_SIZE(a6xx_pwrup_reglist); i++) {
-		r = &a6xx_pwrup_reglist[i];
-		kgsl_regread(KGSL_DEVICE(adreno_dev), r->offset, &r->val);
-	}
-
-	lock = (struct cpu_gpu_lock *) adreno_dev->pwrup_reglist.hostptr;
-	lock->flag_ucode = 0;
-	lock->flag_kmd = 0;
-	lock->turn = 0;
-
 	/*
 	 * The overall register list is composed of
 	 * 1. Static IFPC-only registers
 	 * 2. Static IFPC + preemption registers
-	 * 2. Dynamic IFPC + preemption registers (ex: perfcounter selects)
+	 * 3. Dynamic IFPC + preemption registers (ex: perfcounter selects)
 	 *
 	 * The CP views the second and third entries as one dynamic list
-	 * starting from list_offset. Thus, list_length should be the sum
-	 * of all three lists above (of which the third list will start off
-	 * empty). And list_offset should be specified as the size in dwords
-	 * of the static IFPC-only register list.
+	 * starting from list_offset. list_length should be the total dwords in
+	 * all the lists and list_offset should be specified as the size in
+	 * dwords of the first entry in the list.
 	 */
-	lock->list_length = (sizeof(a6xx_ifpc_pwrup_reglist) +
-			sizeof(a6xx_pwrup_reglist)) >> 2;
-	lock->list_offset = sizeof(a6xx_ifpc_pwrup_reglist) >> 2;
-
-	memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock),
-		a6xx_ifpc_pwrup_reglist, sizeof(a6xx_ifpc_pwrup_reglist));
-	memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock)
-		+ sizeof(a6xx_ifpc_pwrup_reglist), a6xx_pwrup_reglist,
-		sizeof(a6xx_pwrup_reglist));
-
-	if (adreno_is_a615_family(adreno_dev)) {
-		for (i = 0; i < ARRAY_SIZE(a615_pwrup_reglist); i++) {
-			r = &a615_pwrup_reglist[i];
-			kgsl_regread(KGSL_DEVICE(adreno_dev),
-				r->offset, &r->val);
-		}
-
-		memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock)
-			+ sizeof(a6xx_ifpc_pwrup_reglist)
-			+ sizeof(a6xx_pwrup_reglist), a615_pwrup_reglist,
-			sizeof(a615_pwrup_reglist));
-
-		lock->list_length += sizeof(a615_pwrup_reglist) >> 2;
-	}
-
-	if (ADRENO_FEATURE(adreno_dev, ADRENO_PERFCTRL_RETAIN)) {
-		for (i = 0; i < ARRAY_SIZE(a6xx_ifpc_perfctr_reglist); i++) {
-			r = &a6xx_ifpc_perfctr_reglist[i];
-			kgsl_regread(KGSL_DEVICE(adreno_dev),
-				r->offset, &r->val);
-		}
-
-		memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock)
-				+ sizeof(a6xx_ifpc_pwrup_reglist)
-				+ sizeof(a6xx_pwrup_reglist),
-				a6xx_ifpc_perfctr_reglist,
-				sizeof(a6xx_ifpc_perfctr_reglist));
-
-		lock->list_length += sizeof(a6xx_ifpc_perfctr_reglist) >> 2;
-	}
+	lock->list_offset = reglist[0].count * 2;
 }
 
 /*
@@ -516,7 +432,7 @@ static void a6xx_start(struct adreno_device *adreno_dev)
 	if (ADRENO_GPUREV(adreno_dev) >= ADRENO_REV_A640) {
 		kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
 		kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362C);
-	} else if (adreno_is_a612(adreno_dev)) {
+	} else if (adreno_is_a612(adreno_dev) || adreno_is_a610(adreno_dev)) {
 		kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x00800060);
 		kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x40201b16);
 	} else {
@@ -524,8 +440,8 @@ static void a6xx_start(struct adreno_device *adreno_dev)
 		kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362C);
 	}
 
-	if (adreno_is_a612(adreno_dev)) {
-		/* For A612 Mem pool size is reduced to 48 */
+	if (adreno_is_a612(adreno_dev) || adreno_is_a610(adreno_dev)) {
+		/* For A612 and A610 Mem pool size is reduced to 48 */
 		kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 48);
 		kgsl_regwrite(device, A6XX_CP_MEM_POOL_DBG_ADDR, 47);
 	} else {
@@ -604,9 +520,8 @@ static void a6xx_start(struct adreno_device *adreno_dev)
 	kgsl_regwrite(device, A6XX_UCHE_MODE_CNTL, (mal << 23) |
 		(lower_bit << 21));
 
-	/* Set hang detection threshold to 0x3FFFFF * 16 cycles */
 	kgsl_regwrite(device, A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
-					(1 << 30) | 0x3fffff);
+				(1 << 30) | a6xx_core->hang_detect_cycles);
 
 	kgsl_regwrite(device, A6XX_UCHE_CLIENT_PF, 1);
 
@@ -628,7 +543,8 @@ static void a6xx_start(struct adreno_device *adreno_dev)
 		kgsl_regrmw(device, A6XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
 
 	/* Set the bit vccCacheSkipDis=1 to get rid of TSEskip logic */
-	kgsl_regrmw(device, A6XX_PC_DBG_ECO_CNTL, 0, (1 << 9));
+	if (a6xx_core->disable_tseskip)
+		kgsl_regrmw(device, A6XX_PC_DBG_ECO_CNTL, 0, (1 << 9));
 
 	/* Enable the GMEM save/restore feature for preemption */
 	if (adreno_is_preemption_enabled(adreno_dev))
@@ -941,9 +857,6 @@ static int a6xx_rb_start(struct adreno_device *adreno_dev)
  */
 static int a6xx_sptprac_enable(struct adreno_device *adreno_dev)
 {
-	if (adreno_is_a612(adreno_dev))
-		return 0;
-
 	return a6xx_gmu_sptprac_enable(adreno_dev);
 }
 
@@ -953,9 +866,6 @@ static int a6xx_sptprac_enable(struct adreno_device *adreno_dev)
  */
 static void a6xx_sptprac_disable(struct adreno_device *adreno_dev)
 {
-	if (adreno_is_a612(adreno_dev))
-		return;
-
 	a6xx_gmu_sptprac_disable(adreno_dev);
 }
 
@@ -1112,7 +1022,8 @@ static int64_t a6xx_read_throttling_counters(struct adreno_device *adreno_dev)
 	u32 a, b, c;
 	struct adreno_busy_data *busy = &adreno_dev->busy_data;
 
-	if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM))
+	if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM) ||
+			!test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
 		return 0;
 
 	/* The counters are selected in a6xx_gmu_enable_lm() */
@@ -1129,14 +1040,26 @@ static int64_t a6xx_read_throttling_counters(struct adreno_device *adreno_dev)
 	/*
 	 * The adjustment is the number of cycles lost to throttling, which
 	 * is calculated as a weighted average of the cycles throttled
-	 * at 15%, 50%, and 90%. The adjustment is negative because in A6XX,
+	 * at different levels. The adjustment is negative because in A6XX,
 	 * the busy count includes the throttled cycles. Therefore, we want
 	 * to remove them to prevent appearing to be busier than
 	 * we actually are.
 	 */
-	adj *= ((a * 15) + (b * 50) + (c * 90)) / 100;
+	if (adreno_is_a620(adreno_dev) || adreno_is_a650(adreno_dev))
+		/*
+		 * With the newer generations, CRC throttle from SIDs of 0x14
+		 * and above cannot be observed in power counters. Since 90%
+		 * throttle uses SID 0x16 the adjustment calculation needs
+		 * correction. The throttling is in increments of 4.2%, and the
+		 * 91.7% counter does a weighted count by the value of sid used
+		 * which are taken into consideration for the final formula.
+		 */
+		adj *= ((a * 42) + (b * 500) +
+			((((int64_t)c - a - b * 12) / 22) * 917)) / 1000;
+	else
+		adj *= ((a * 5) + (b * 50) + (c * 90)) / 100;
 
-	trace_kgsl_clock_throttling(0, a, b, c, adj);
+	trace_kgsl_clock_throttling(0, b, c, a, adj);
 
 	return adj;
 }
@@ -1345,47 +1268,66 @@ static void a6xx_llc_enable_overrides(struct adreno_device *adreno_dev)
 			A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_0, 0x3);
 }
 
-static const char *fault_block[8] = {
-	[0] = "CP",
-	[1] = "UCHE",
-	[2] = "VFD",
-	[3] = "UCHE",
-	[4] = "CCU",
-	[5] = "unknown",
-	[6] = "CDP Prefetch",
-	[7] = "GPMU",
+static const char *uche_client[7][3] = {
+	{"SP | VSC | VPC | HLSQ | PC | LRZ", "TP", "VFD"},
+	{"VSC | VPC | HLSQ | PC | LRZ", "TP | VFD", "SP"},
+	{"SP | VPC | HLSQ | PC | LRZ", "TP | VFD", "VSC"},
+	{"SP | VSC | HLSQ | PC | LRZ", "TP | VFD", "VPC"},
+	{"SP | VSC | VPC | PC | LRZ", "TP | VFD", "HLSQ"},
+	{"SP | VSC | VPC | HLSQ | LRZ", "TP | VFD", "PC"},
+	{"SP | VSC | VPC | HLSQ | PC", "TP | VFD", "LRZ"},
 };
 
-static const char *uche_client[8] = {
-	[0] = "VFD",
-	[1] = "SP",
-	[2] = "VSC",
-	[3] = "VPC",
-	[4] = "HLSQ",
-	[5] = "PC",
-	[6] = "LRZ",
-	[7] = "unknown",
-};
+#define SCOOBYDOO 0x5c00bd00
 
-static const char *a6xx_iommu_fault_block(struct adreno_device *adreno_dev,
-						unsigned int fsynr1)
+static const char *a6xx_fault_block_uche(struct kgsl_device *device,
+		unsigned int mid)
 {
-	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-	unsigned int client_id;
-	unsigned int uche_client_id;
-
-	client_id = fsynr1 & 0xff;
-
-	if (client_id >= ARRAY_SIZE(fault_block))
-		return "unknown";
-	else if (client_id != 3)
-		return fault_block[client_id];
+	unsigned int uche_client_id = 0;
+	static char str[40];
 
 	mutex_lock(&device->mutex);
+
+	if (!kgsl_state_is_awake(device)) {
+		mutex_unlock(&device->mutex);
+		return "UCHE: unknown";
+	}
+
 	kgsl_regread(device, A6XX_UCHE_CLIENT_PF, &uche_client_id);
 	mutex_unlock(&device->mutex);
 
-	return uche_client[uche_client_id & A6XX_UCHE_CLIENT_PF_CLIENT_ID_MASK];
+	/* Ignore the value if the gpu is in IFPC */
+	if (uche_client_id == SCOOBYDOO)
+		return "UCHE: unknown";
+
+	uche_client_id &= A6XX_UCHE_CLIENT_PF_CLIENT_ID_MASK;
+	snprintf(str, sizeof(str), "UCHE: %s",
+			uche_client[uche_client_id][mid - 1]);
+
+	return str;
+}
+
+static const char *a6xx_iommu_fault_block(struct kgsl_device *device,
+		unsigned int fsynr1)
+{
+	unsigned int mid = fsynr1 & 0xff;
+
+	switch (mid) {
+	case 0:
+		return "CP";
+	case 1:
+	case 2:
+	case 3:
+		return a6xx_fault_block_uche(device, mid);
+	case 4:
+		return "CCU";
+	case 6:
+		return "CDP Prefetch";
+	case 7:
+		return "GPMU";
+	}
+
+	return "Unknown";
 }
 
 static void a6xx_cp_callback(struct adreno_device *adreno_dev, int bit)
@@ -2504,21 +2446,20 @@ static const struct adreno_reg_offsets a6xx_reg_offsets = {
 	.offset_0 = ADRENO_REG_REGISTER_MAX,
 };
 
-static int a6xx_perfcounter_update(struct adreno_device *adreno_dev,
-	struct adreno_perfcount_register *reg, bool update_reg)
+static int cpu_gpu_lock(struct cpu_gpu_lock *lock)
 {
-	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-	struct cpu_gpu_lock *lock = adreno_dev->pwrup_reglist.hostptr;
-	struct reg_list_pair *reg_pair = (struct reg_list_pair *)(lock + 1);
-	unsigned int i;
 	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
-	int ret = 0;
 
+	/* Indicate that the CPU wants the lock */
 	lock->flag_kmd = 1;
-	/* Write flag_kmd before turn */
+
+	/* post the request */
 	wmb();
+
+	/* Wait for our turn */
 	lock->turn = 0;
-	/* Write these fields before looping */
+
+	/* Finish all memory transactions before moving on */
 	mb();
 
 	/*
@@ -2530,60 +2471,99 @@ static int a6xx_perfcounter_update(struct adreno_device *adreno_dev,
 		cpu_relax();
 		/* Get the latest updates from GPU */
 		rmb();
-		/*
-		 * Make sure we wait at least 1sec for the lock,
-		 * if we did not get it after 1sec return an error.
-		 */
-		if (time_after(jiffies, timeout) &&
-			(lock->flag_ucode == 1 && lock->turn == 0)) {
-			ret = -EBUSY;
-			goto unlock;
-		}
+
+		if (time_after(jiffies, timeout))
+			break;
 	}
 
-	/* Read flag_ucode and turn before list_length */
-	rmb();
+	if (lock->flag_ucode == 1 && lock->turn == 0)
+		return -EBUSY;
+
+	return 0;
+}
+
+static void cpu_gpu_unlock(struct cpu_gpu_lock *lock)
+{
+	/* Make sure all writes are done before releasing the lock */
+	wmb();
+	lock->flag_kmd = 0;
+}
+
+static int a6xx_perfcounter_update(struct adreno_device *adreno_dev,
+	struct adreno_perfcount_register *reg, bool update_reg)
+{
+	void *ptr = adreno_dev->pwrup_reglist.hostptr;
+	struct cpu_gpu_lock *lock = ptr;
+	u32 *data = ptr + sizeof(*lock);
+	int i, offset = 0;
+
+	if (cpu_gpu_lock(lock)) {
+		cpu_gpu_unlock(lock);
+		return -EBUSY;
+	}
+
 	/*
 	 * If the perfcounter select register is already present in reglist
 	 * update it, otherwise append the <select register, value> pair to
 	 * the end of the list.
 	 */
-	for (i = 0; i < lock->list_length >> 1; i++)
-		if (reg_pair[i].offset == reg->select)
-			break;
-	/*
-	 * If the perfcounter selct register is not present overwrite last entry
-	 * with new entry and add RBBM perf counter enable at the end.
-	 */
-	if (ADRENO_FEATURE(adreno_dev, ADRENO_PERFCTRL_RETAIN) &&
-			(i == lock->list_length >> 1)) {
-		reg_pair[i-1].offset = reg->select;
-		reg_pair[i-1].val = reg->countable;
+	for (i = 0; i < lock->list_length >> 1; i++) {
+		if (data[offset] == reg->select) {
+			data[offset + 1] = reg->countable;
+			goto update;
+		}
 
-		/* Enable perf counter after performance counter selections */
-		reg_pair[i].offset = A6XX_RBBM_PERFCTR_CNTL;
-		reg_pair[i].val = 1;
-
-	} else {
-		/*
-		 * If perf counter select register is already present in reglist
-		 * just update list without adding the RBBM perfcontrol enable.
-		 */
-		reg_pair[i].offset = reg->select;
-		reg_pair[i].val = reg->countable;
+		offset += 2;
 	}
 
-	if (i == lock->list_length >> 1)
-		lock->list_length += 2;
+	/*
+	 * For a612 targets A6XX_RBBM_PERFCTR_CNTL needs to be the last entry,
+	 * so overwrite the existing A6XX_RBBM_PERFCNTL_CTRL and add it back to
+	 * the end. All other targets just append the new counter to the end.
+	 */
+	if (adreno_is_a612(adreno_dev)) {
+		data[offset - 2] = reg->select;
+		data[offset - 1] = reg->countable;
 
+		data[offset] = A6XX_RBBM_PERFCTR_CNTL,
+		data[offset + 1] = 1;
+	} else {
+		data[offset] = reg->select;
+		data[offset + 1] = reg->countable;
+	}
+
+	lock->list_length += 2;
+
+update:
 	if (update_reg)
-		kgsl_regwrite(device, reg->select, reg->countable);
+		kgsl_regwrite(KGSL_DEVICE(adreno_dev), reg->select,
+			reg->countable);
 
-unlock:
-	/* All writes done before releasing the lock */
-	wmb();
-	lock->flag_kmd = 0;
-	return ret;
+	cpu_gpu_unlock(lock);
+	return 0;
+}
+
+static void a6xx_clk_set_options(struct adreno_device *adreno_dev,
+	const char *name, struct clk *clk, bool on)
+{
+	if (!adreno_is_a610(adreno_dev))
+		return;
+
+	/* Handle clock settings for GFX PSCBCs */
+	if (on) {
+		if (!strcmp(name, "mem_iface_clk")) {
+			clk_set_flags(clk, CLKFLAG_NORETAIN_PERIPH);
+			clk_set_flags(clk, CLKFLAG_NORETAIN_MEM);
+		} else if (!strcmp(name, "core_clk")) {
+			clk_set_flags(clk, CLKFLAG_RETAIN_PERIPH);
+			clk_set_flags(clk, CLKFLAG_RETAIN_MEM);
+		}
+	} else {
+		if (!strcmp(name, "core_clk")) {
+			clk_set_flags(clk, CLKFLAG_NORETAIN_PERIPH);
+			clk_set_flags(clk, CLKFLAG_NORETAIN_MEM);
+		}
+	}
 }
 
 struct adreno_gpudev adreno_a6xx_gpudev = {
@@ -2622,4 +2602,5 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
 	.ccu_invalidate = a6xx_ccu_invalidate,
 	.perfcounter_update = a6xx_perfcounter_update,
 	.coresight = {&a6xx_coresight, &a6xx_coresight_cx},
+	.clk_set_options = a6xx_clk_set_options,
 };
diff --git a/drivers/gpu/msm/adreno_a6xx.h b/drivers/gpu/msm/adreno_a6xx.h
index ae6839a..ed45f1a 100644
--- a/drivers/gpu/msm/adreno_a6xx.h
+++ b/drivers/gpu/msm/adreno_a6xx.h
@@ -11,6 +11,26 @@
 #include "a6xx_reg.h"
 
 /**
+ * struct a6xx_protected_regs - container for a protect register span
+ */
+struct a6xx_protected_regs {
+	/** @reg: Physical protected mode register to write to */
+	u32 reg;
+	/** @start: Dword offset of the starting register in the range */
+	u32 start;
+	/**
+	 * @end: Dword offset of the ending register in the range
+	 * (inclusive)
+	 */
+	u32 end;
+	/**
+	 * @noaccess: 1 if the register should not be accessible from
+	 * userspace, 0 if it can be read (but not written)
+	 */
+	u32 noaccess;
+};
+
+/**
  * struct adreno_a6xx_core - a6xx specific GPU core definitions
  */
 struct adreno_a6xx_core {
@@ -42,6 +62,12 @@ struct adreno_a6xx_core {
 	bool veto_fal10;
 	/** @pdc_in_aop: True if PDC programmed in AOP */
 	bool pdc_in_aop;
+	/** @hang_detect_cycles: Hang detect counter timeout value */
+	u32 hang_detect_cycles;
+	/** @protected_regs: Array of protected registers for the target */
+	const struct a6xx_protected_regs *protected_regs;
+	/** @disable_tseskip: True if TSESkip logic is disabled */
+	bool disable_tseskip;
 };
 
 #define CP_CLUSTER_FE		0x0
diff --git a/drivers/gpu/msm/adreno_a6xx_gmu.c b/drivers/gpu/msm/adreno_a6xx_gmu.c
index 8cd4808..184fd73 100644
--- a/drivers/gpu/msm/adreno_a6xx_gmu.c
+++ b/drivers/gpu/msm/adreno_a6xx_gmu.c
@@ -228,7 +228,7 @@ static int a6xx_load_pdc_ucode(struct kgsl_device *device)
 	_regwrite(cfg, PDC_GPU_TCS3_CMD0_MSGID + PDC_CMD_OFFSET, 0x10108);
 	_regwrite(cfg, PDC_GPU_TCS3_CMD0_ADDR + PDC_CMD_OFFSET, 0x30000);
 
-	if (adreno_is_a618(adreno_dev) || adreno_is_a650(adreno_dev))
+	if (adreno_is_a618(adreno_dev) || adreno_is_a650_family(adreno_dev))
 		_regwrite(cfg, PDC_GPU_TCS3_CMD0_DATA + PDC_CMD_OFFSET, 0x2);
 	else
 		_regwrite(cfg, PDC_GPU_TCS3_CMD0_DATA + PDC_CMD_OFFSET, 0x3);
@@ -1167,38 +1167,8 @@ static int a6xx_gmu_load_firmware(struct kgsl_device *device)
 	return gmu_cache_finalize(device);
 }
 
-#define A6XX_STATE_OF_CHILD             (BIT(4) | BIT(5))
-#define A6XX_IDLE_FULL_LLM              BIT(0)
-#define A6XX_WAKEUP_ACK                 BIT(1)
-#define A6XX_IDLE_FULL_ACK              BIT(0)
 #define A6XX_VBIF_XIN_HALT_CTRL1_ACKS   (BIT(0) | BIT(1) | BIT(2) | BIT(3))
 
-static int a6xx_llm_glm_handshake(struct kgsl_device *device)
-{
-	unsigned int val;
-	const struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
-
-	if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM) ||
-			!test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
-		return 0;
-
-	gmu_core_regread(device, A6XX_GMU_LLM_GLM_SLEEP_CTRL, &val);
-	if (!(val & A6XX_STATE_OF_CHILD)) {
-		gmu_core_regrmw(device, A6XX_GMU_LLM_GLM_SLEEP_CTRL, 0, BIT(4));
-		gmu_core_regrmw(device, A6XX_GMU_LLM_GLM_SLEEP_CTRL, 0,
-				A6XX_IDLE_FULL_LLM);
-		if (timed_poll_check(device, A6XX_GMU_LLM_GLM_SLEEP_STATUS,
-				A6XX_IDLE_FULL_ACK, GPU_RESET_TIMEOUT,
-				A6XX_IDLE_FULL_ACK)) {
-			dev_err(&gmu->pdev->dev, "LLM-GLM handshake failed\n");
-			return -EINVAL;
-		}
-	}
-
-	return 0;
-}
-
 static void a6xx_isense_disable(struct kgsl_device *device)
 {
 	unsigned int val;
@@ -1224,9 +1194,6 @@ static int a6xx_gmu_suspend(struct kgsl_device *device)
 	/* Disable ISENSE if it's on */
 	a6xx_isense_disable(device);
 
-	/* LLM-GLM handshake sequence */
-	a6xx_llm_glm_handshake(device);
-
 	/* If SPTP_RAC is on, turn off SPTP_RAC HS */
 	a6xx_gmu_sptprac_disable(ADRENO_DEVICE(device));
 
@@ -1384,14 +1351,19 @@ void a6xx_gmu_enable_lm(struct kgsl_device *device)
 
 	/*
 	 * For throttling, use the following counters for throttled cycles:
-	 * XOCLK1: countable 0x10
-	 * XOCLK2: countable 0x15
-	 * XOCLK3: countable 0x19
+	 * XOCLK1: countable: 0x10
+	 * XOCLK2: countable: 0x16 for newer hardware / 0x15 for others
+	 * XOCLK3: countable: 0xf for newer hardware / 0x19 for others
 	 *
 	 * POWER_CONTROL_SELECT_0 controls counters 0 - 3, each selector
 	 * is 8 bits wide.
 	 */
-	val = (0x10 << 8) | (0x15 << 16) | (0x19 << 24);
+
+	if (adreno_is_a620(adreno_dev) || adreno_is_a650(adreno_dev))
+		val = (0x10 << 8) | (0x16 << 16) | (0x0f << 24);
+	else
+		val = (0x10 << 8) | (0x15 << 16) | (0x19 << 24);
+
 
 	/* Make sure not to write over XOCLK0 */
 	gmu_core_regrmw(device, A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0,
@@ -1645,6 +1617,8 @@ static void a6xx_gmu_snapshot(struct kgsl_device *device,
 	if (a6xx_gmu_gx_is_on(device)) {
 		/* Set fence to ALLOW mode so registers can be read */
 		kgsl_regwrite(device, A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
+		/* Make sure the previous write posted before reading */
+		wmb();
 		kgsl_regread(device, A6XX_GMU_AO_AHB_FENCE_CTRL, &val);
 
 		dev_err(device->dev, "set FENCE to ALLOW mode:%x\n", val);
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index dcd01b2..fbeb4b5 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -379,6 +379,13 @@ static const unsigned int a6xx_pre_crashdumper_registers[] = {
 	0x825, 0x825,
 };
 
+static const unsigned int a6xx_gmu_wrapper_registers[] = {
+	/* GMU CX */
+	0x1f840, 0x1f840, 0x1f844, 0x1f845, 0x1f887, 0x1f889,
+	/* GMU AO*/
+	0x23b0C, 0x23b0E, 0x23b15, 0x23b15,
+};
+
 enum a6xx_debugbus_id {
 	A6XX_DBGBUS_CP           = 0x1,
 	A6XX_DBGBUS_RBBM         = 0x2,
@@ -1763,6 +1770,10 @@ void a6xx_snapshot(struct adreno_device *adreno_dev,
 		adreno_snapshot_registers(device, snapshot,
 			a6xx_rscc_snapshot_registers,
 			ARRAY_SIZE(a6xx_rscc_snapshot_registers) / 2);
+	} else if (adreno_is_a610(adreno_dev)) {
+		adreno_snapshot_registers(device, snapshot,
+			a6xx_gmu_wrapper_registers,
+			ARRAY_SIZE(a6xx_gmu_wrapper_registers) / 2);
 	}
 
 	sptprac_on = gpudev->sptprac_is_on(adreno_dev);
diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c
index 8289f52..8d2a97e 100644
--- a/drivers/gpu/msm/adreno_snapshot.c
+++ b/drivers/gpu/msm/adreno_snapshot.c
@@ -738,7 +738,7 @@ static void setup_fault_process(struct kgsl_device *device,
 	if (kgsl_mmu_is_perprocess(&device->mmu)) {
 		struct kgsl_process_private *tmp;
 
-		mutex_lock(&kgsl_driver.process_mutex);
+		spin_lock(&kgsl_driver.proclist_lock);
 		list_for_each_entry(tmp, &kgsl_driver.process_list, list) {
 			u64 pt_ttbr0;
 
@@ -749,7 +749,7 @@ static void setup_fault_process(struct kgsl_device *device,
 				break;
 			}
 		}
-		mutex_unlock(&kgsl_driver.process_mutex);
+		spin_unlock(&kgsl_driver.proclist_lock);
 	}
 done:
 	snapshot->process = process;
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index df4d30f..2257294 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -921,7 +921,7 @@ struct kgsl_process_private *kgsl_process_private_find(pid_t pid)
 {
 	struct kgsl_process_private *p, *private = NULL;
 
-	mutex_lock(&kgsl_driver.process_mutex);
+	spin_lock(&kgsl_driver.proclist_lock);
 	list_for_each_entry(p, &kgsl_driver.process_list, list) {
 		if (p->pid == pid) {
 			if (kgsl_process_private_get(p))
@@ -929,7 +929,8 @@ struct kgsl_process_private *kgsl_process_private_find(pid_t pid)
 			break;
 		}
 	}
-	mutex_unlock(&kgsl_driver.process_mutex);
+	spin_unlock(&kgsl_driver.proclist_lock);
+
 	return private;
 }
 
@@ -1035,7 +1036,9 @@ static void kgsl_process_private_close(struct kgsl_device_private *dev_priv,
 		kgsl_mmu_detach_pagetable(private->pagetable);
 
 	/* Remove the process struct from the master list */
+	spin_lock(&kgsl_driver.proclist_lock);
 	list_del(&private->list);
+	spin_unlock(&kgsl_driver.proclist_lock);
 
 	/*
 	 * Unlock the mutex before releasing the memory and the debugfs
@@ -1071,7 +1074,9 @@ static struct kgsl_process_private *kgsl_process_private_open(
 		kgsl_process_init_sysfs(device, private);
 		kgsl_process_init_debugfs(private);
 
+		spin_lock(&kgsl_driver.proclist_lock);
 		list_add(&private->list, &kgsl_driver.process_list);
+		spin_unlock(&kgsl_driver.proclist_lock);
 	}
 
 done:
@@ -4870,6 +4875,7 @@ static const struct file_operations kgsl_fops = {
 
 struct kgsl_driver kgsl_driver  = {
 	.process_mutex = __MUTEX_INITIALIZER(kgsl_driver.process_mutex),
+	.proclist_lock = __SPIN_LOCK_UNLOCKED(kgsl_driver.proclist_lock),
 	.ptlock = __SPIN_LOCK_UNLOCKED(kgsl_driver.ptlock),
 	.devlock = __MUTEX_INITIALIZER(kgsl_driver.devlock),
 	/*
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index d824177..347a30c 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -113,6 +113,7 @@ struct kgsl_context;
  * @pagetable_list: LIst of open pagetables
  * @ptlock: Lock for accessing the pagetable list
  * @process_mutex: Mutex for accessing the process list
+ * @proclist_lock: Lock for accessing the process list
  * @devlock: Mutex protecting the device list
  * @stats: Struct containing atomic memory statistics
  * @full_cache_threshold: the threshold that triggers a full cache flush
@@ -131,6 +132,7 @@ struct kgsl_driver {
 	struct list_head pagetable_list;
 	spinlock_t ptlock;
 	struct mutex process_mutex;
+	spinlock_t proclist_lock;
 	struct mutex devlock;
 	struct {
 		atomic_long_t vmalloc;
@@ -331,16 +333,6 @@ struct kgsl_event_group {
 };
 
 /**
- * struct kgsl_protected_registers - Protected register range
- * @base: Offset of the range to be protected
- * @range: Range (# of registers = 2 ** range)
- */
-struct kgsl_protected_registers {
-	unsigned int base;
-	int range;
-};
-
-/**
  * struct sparse_bind_object - Bind metadata
  * @node: Node for the rb tree
  * @p_memdesc: Physical memdesc bound to
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 8a8129b..bc6ff13 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -383,6 +383,9 @@ static void gmu_memory_close(struct gmu_device *gmu)
 		md = &gmu->kmem_entries[i];
 		ctx = &gmu_ctx[md->ctx_idx];
 
+		if (!ctx->domain)
+			continue;
+
 		if (md->gmuaddr && md->mem_type != GMU_ITCM &&
 				md->mem_type != GMU_DTCM)
 			iommu_unmap(ctx->domain, md->gmuaddr, md->size);
@@ -804,38 +807,6 @@ static void build_bwtable_cmd_cache(struct gmu_device *gmu)
 				votes->cnoc_votes.cmd_data[i][j];
 }
 
-static int gmu_acd_probe(struct gmu_device *gmu, struct device_node *node)
-{
-	struct hfi_acd_table_cmd *cmd = &gmu->hfi.acd_tbl_cmd;
-	struct device_node *acd_node;
-
-	acd_node = of_find_node_by_name(node, "qcom,gpu-acd-table");
-	if (!acd_node)
-		return -ENODEV;
-
-	cmd->hdr = 0xFFFFFFFF;
-	cmd->version = HFI_ACD_INIT_VERSION;
-	cmd->enable_by_level = 0;
-	cmd->stride = 0;
-	cmd->num_levels = 0;
-
-	of_property_read_u32(acd_node, "qcom,acd-stride", &cmd->stride);
-	if (!cmd->stride || cmd->stride > MAX_ACD_STRIDE)
-		return -EINVAL;
-
-	of_property_read_u32(acd_node, "qcom,acd-num-levels", &cmd->num_levels);
-	if (!cmd->num_levels || cmd->num_levels > MAX_ACD_NUM_LEVELS)
-		return -EINVAL;
-
-	of_property_read_u32(acd_node, "qcom,acd-enable-by-level",
-			&cmd->enable_by_level);
-	if (hweight32(cmd->enable_by_level) != cmd->num_levels)
-		return -EINVAL;
-
-	return of_property_read_u32_array(acd_node, "qcom,acd-data",
-			cmd->data, cmd->stride * cmd->num_levels);
-}
-
 /*
  * gmu_bus_vote_init - initialized RPMh votes needed for bw scaling by GMU.
  * @gmu: Pointer to GMU device
@@ -1278,6 +1249,41 @@ int gmu_cache_finalize(struct kgsl_device *device)
 	return 0;
 }
 
+static void gmu_acd_probe(struct kgsl_device *device, struct gmu_device *gmu,
+		struct device_node *node)
+{
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	struct hfi_acd_table_cmd *cmd = &gmu->hfi.acd_tbl_cmd;
+	u32 acd_level, cmd_idx, numlvl = pwr->num_pwrlevels;
+	int ret, i;
+
+	if (!ADRENO_FEATURE(ADRENO_DEVICE(device), ADRENO_ACD))
+		return;
+
+	cmd->hdr = 0xFFFFFFFF;
+	cmd->version = HFI_ACD_INIT_VERSION;
+	cmd->stride = 1;
+	cmd->enable_by_level = 0;
+
+	for (i = 0, cmd_idx = 0; i < numlvl; i++) {
+		acd_level = pwr->pwrlevels[numlvl - i - 1].acd_level;
+		if (acd_level) {
+			cmd->enable_by_level |= (1 << i);
+			cmd->data[cmd_idx++] = acd_level;
+		}
+	}
+
+	if (!cmd->enable_by_level)
+		return;
+
+	cmd->num_levels = cmd_idx;
+
+	ret = gmu_aop_mailbox_init(device, gmu);
+	if (ret)
+		dev_err(&gmu->pdev->dev,
+			"AOP mailbox init failed: %d\n", ret);
+}
+
 /* Do not access any GMU registers in GMU probe function */
 static int gmu_probe(struct kgsl_device *device, struct device_node *node)
 {
@@ -1390,17 +1396,7 @@ static int gmu_probe(struct kgsl_device *device, struct device_node *node)
 	else
 		gmu->idle_level = GPU_HW_ACTIVE;
 
-	if (ADRENO_FEATURE(adreno_dev, ADRENO_ACD)) {
-		if (!gmu_acd_probe(gmu, node)) {
-			/* Init the AOP mailbox if we have a valid ACD table */
-			ret = gmu_aop_mailbox_init(device, gmu);
-			if (ret)
-				dev_err(&gmu->pdev->dev,
-					"AOP mailbox init failed: %d\n", ret);
-		} else
-			dev_err(&gmu->pdev->dev,
-				"ACD probe failed: missing or invalid table\n");
-	}
+	gmu_acd_probe(device, gmu, node);
 
 	set_bit(GMU_ENABLED, &device->gmu_core.flags);
 	device->gmu_core.dev_ops = &adreno_a6xx_gmudev;
diff --git a/drivers/gpu/msm/kgsl_gmu_core.c b/drivers/gpu/msm/kgsl_gmu_core.c
index 60669a5..8af7840 100644
--- a/drivers/gpu/msm/kgsl_gmu_core.c
+++ b/drivers/gpu/msm/kgsl_gmu_core.c
@@ -360,5 +360,5 @@ int gmu_core_dev_wait_for_active_transition(struct kgsl_device *device)
 	if (ops && ops->wait_for_active_transition)
 		return ops->wait_for_active_transition(device);
 
-	return -ETIMEDOUT;
+	return 0;
 }
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 88177fb..488dacc 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -635,10 +635,8 @@ static void _get_entries(struct kgsl_process_private *private,
 
 static void _find_mem_entries(struct kgsl_mmu *mmu, uint64_t faultaddr,
 		struct _mem_entry *preventry, struct _mem_entry *nextentry,
-		struct kgsl_context *context)
+		struct kgsl_process_private *private)
 {
-	struct kgsl_process_private *private;
-
 	memset(preventry, 0, sizeof(*preventry));
 	memset(nextentry, 0, sizeof(*nextentry));
 
@@ -647,8 +645,7 @@ static void _find_mem_entries(struct kgsl_mmu *mmu, uint64_t faultaddr,
 
 	if (ADDR_IN_GLOBAL(mmu, faultaddr)) {
 		_get_global_entries(faultaddr, preventry, nextentry);
-	} else if (context) {
-		private = context->proc_priv;
+	} else if (private) {
 		spin_lock(&private->mem_lock);
 		_get_entries(private, faultaddr, preventry, nextentry);
 		spin_unlock(&private->mem_lock);
@@ -687,6 +684,29 @@ static void _check_if_freed(struct kgsl_iommu_context *ctx,
 	}
 }
 
+static struct kgsl_process_private *kgsl_iommu_get_process(u64 ptbase)
+{
+	struct kgsl_process_private *p;
+	struct kgsl_iommu_pt *iommu_pt;
+
+	spin_lock(&kgsl_driver.proclist_lock);
+
+	list_for_each_entry(p, &kgsl_driver.process_list, list) {
+		iommu_pt = p->pagetable->priv;
+		if (iommu_pt->ttbr0 == ptbase) {
+			if (!kgsl_process_private_get(p))
+				p = NULL;
+
+			spin_unlock(&kgsl_driver.proclist_lock);
+			return p;
+		}
+	}
+
+	spin_unlock(&kgsl_driver.proclist_lock);
+
+	return NULL;
+}
+
 static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
 	struct device *dev, unsigned long addr, int flags, void *token)
 {
@@ -695,7 +715,7 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
 	struct kgsl_mmu *mmu = pt->mmu;
 	struct kgsl_iommu *iommu;
 	struct kgsl_iommu_context *ctx;
-	u64 ptbase, proc_ptbase;
+	u64 ptbase;
 	u32 contextidr;
 	pid_t pid = 0;
 	pid_t ptname;
@@ -705,9 +725,9 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
 	struct adreno_device *adreno_dev;
 	struct adreno_gpudev *gpudev;
 	unsigned int no_page_fault_log = 0;
-	unsigned int curr_context_id = 0;
-	struct kgsl_context *context;
 	char *fault_type = "unknown";
+	char *comm = "unknown";
+	struct kgsl_process_private *private;
 
 	static DEFINE_RATELIMIT_STATE(_rs,
 					DEFAULT_RATELIMIT_INTERVAL,
@@ -722,21 +742,6 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
 	adreno_dev = ADRENO_DEVICE(device);
 	gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 
-	if (pt->name == KGSL_MMU_SECURE_PT)
-		ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
-
-	/*
-	 * set the fault bits and stuff before any printks so that if fault
-	 * handler runs then it will know it's dealing with a pagefault.
-	 * Read the global current timestamp because we could be in middle of
-	 * RB switch and hence the cur RB may not be reliable but global
-	 * one will always be reliable
-	 */
-	kgsl_sharedmem_readl(&device->memstore, &curr_context_id,
-		KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context));
-
-	context = kgsl_context_get(device, curr_context_id);
-
 	write = (flags & IOMMU_FAULT_WRITE) ? 1 : 0;
 	if (flags & IOMMU_FAULT_TRANSLATION)
 		fault_type = "translation";
@@ -747,12 +752,17 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
 	else if (flags & IOMMU_FAULT_TRANSACTION_STALLED)
 		fault_type = "transaction stalled";
 
-	if (context != NULL) {
-		/* save pagefault timestamp for GFT */
-		set_bit(KGSL_CONTEXT_PRIV_PAGEFAULT, &context->priv);
-		pid = context->proc_priv->pid;
+	ptbase = KGSL_IOMMU_GET_CTX_REG_Q(ctx, TTBR0);
+	private = kgsl_iommu_get_process(ptbase);
+
+	if (private) {
+		pid = private->pid;
+		comm = private->comm;
 	}
 
+	if (pt->name == KGSL_MMU_SECURE_PT)
+		ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
+
 	ctx->fault = 1;
 
 	if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE,
@@ -767,9 +777,7 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
 		mutex_unlock(&device->mutex);
 	}
 
-	ptbase = KGSL_IOMMU_GET_CTX_REG_Q(ctx, TTBR0);
 	contextidr = KGSL_IOMMU_GET_CTX_REG(ctx, CONTEXTIDR);
-
 	ptname = MMU_FEATURE(mmu, KGSL_MMU_GLOBAL_PAGETABLE) ?
 		KGSL_MMU_GLOBAL_PT : pid;
 	/*
@@ -778,43 +786,19 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
 	 * search and delays the trace unnecessarily.
 	 */
 	trace_kgsl_mmu_pagefault(ctx->kgsldev, addr,
-			ptname,
-			context != NULL ? context->proc_priv->comm : "unknown",
-			write ? "write" : "read");
+			ptname, comm, write ? "write" : "read");
 
 	if (test_bit(KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE,
 		&adreno_dev->ft_pf_policy))
 		no_page_fault_log = kgsl_mmu_log_fault_addr(mmu, ptbase, addr);
 
 	if (!no_page_fault_log && __ratelimit(&_rs)) {
-		const char *api_str;
-
-		if (context != NULL) {
-			struct adreno_context *drawctxt =
-					ADRENO_CONTEXT(context);
-
-			api_str = get_api_type_str(drawctxt->type);
-		} else
-			api_str = "UNKNOWN";
-
 		dev_crit(ctx->kgsldev->dev,
 			"GPU PAGE FAULT: addr = %lX pid= %d name=%s\n", addr,
-			ptname,
-			context != NULL ? context->proc_priv->comm : "unknown");
-
-		if (context != NULL) {
-			proc_ptbase = kgsl_mmu_pagetable_get_ttbr0(
-					context->proc_priv->pagetable);
-
-			if (ptbase != proc_ptbase)
-				dev_crit(ctx->kgsldev->dev,
-				"Pagetable address mismatch: HW address is 0x%llx but SW expected 0x%llx\n",
-				ptbase, proc_ptbase);
-		}
-
+			ptname, comm);
 		dev_crit(ctx->kgsldev->dev,
-			"context=%s ctx_type=%s TTBR0=0x%llx CIDR=0x%x (%s %s fault)\n",
-			ctx->name, api_str, ptbase, contextidr,
+			"context=%s TTBR0=0x%llx CIDR=0x%x (%s %s fault)\n",
+			ctx->name, ptbase, contextidr,
 			write ? "write" : "read", fault_type);
 
 		if (gpudev->iommu_fault_block) {
@@ -822,9 +806,8 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
 
 			fsynr1 = KGSL_IOMMU_GET_CTX_REG(ctx, FSYNR1);
 			dev_crit(ctx->kgsldev->dev,
-				      "FAULTING BLOCK: %s\n",
-				      gpudev->iommu_fault_block(adreno_dev,
-								fsynr1));
+				"FAULTING BLOCK: %s\n",
+				gpudev->iommu_fault_block(device, fsynr1));
 		}
 
 		/* Don't print the debug if this is a permissions fault */
@@ -834,7 +817,7 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
 			dev_err(ctx->kgsldev->dev,
 				      "---- nearby memory ----\n");
 
-			_find_mem_entries(mmu, addr, &prev, &next, context);
+			_find_mem_entries(mmu, addr, &prev, &next, private);
 			if (prev.gpuaddr)
 				_print_entry(ctx->kgsldev, &prev);
 			else
@@ -877,7 +860,8 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
 		adreno_dispatcher_schedule(device);
 	}
 
-	kgsl_context_put(context);
+	kgsl_process_private_put(private);
+
 	return ret;
 }
 
@@ -2167,14 +2151,6 @@ static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
 	return 0;
 }
 
-static struct kgsl_protected_registers *
-kgsl_iommu_get_prot_regs(struct kgsl_mmu *mmu)
-{
-	struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
-
-	return &iommu->protect;
-}
-
 static struct kgsl_iommu_addr_entry *_find_gpuaddr(
 		struct kgsl_pagetable *pagetable, uint64_t gpuaddr)
 {
@@ -2626,15 +2602,6 @@ static int _kgsl_iommu_probe(struct kgsl_device *device,
 	iommu->regstart = reg_val[0];
 	iommu->regsize = reg_val[1];
 
-	/* Protecting the SMMU registers is mandatory */
-	if (of_property_read_u32_array(node, "qcom,protect", reg_val, 2)) {
-		dev_err(device->dev,
-			"dt: no iommu protection range specified\n");
-		return -EINVAL;
-	}
-	iommu->protect.base = reg_val[0] / sizeof(u32);
-	iommu->protect.range = reg_val[1] / sizeof(u32);
-
 	of_property_for_each_string(node, "clock-names", prop, cname) {
 		struct clk *c = devm_clk_get(&pdev->dev, cname);
 
@@ -2722,7 +2689,6 @@ struct kgsl_mmu_ops kgsl_iommu_ops = {
 	.mmu_pt_equal = kgsl_iommu_pt_equal,
 	.mmu_set_pf_policy = kgsl_iommu_set_pf_policy,
 	.mmu_pagefault_resume = kgsl_iommu_pagefault_resume,
-	.mmu_get_prot_regs = kgsl_iommu_get_prot_regs,
 	.mmu_init_pt = kgsl_iommu_init_pt,
 	.mmu_add_global = kgsl_iommu_add_global,
 	.mmu_remove_global = kgsl_iommu_remove_global,
diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h
index b98f2c2..2e4c2ad 100644
--- a/drivers/gpu/msm/kgsl_iommu.h
+++ b/drivers/gpu/msm/kgsl_iommu.h
@@ -112,7 +112,6 @@ struct kgsl_iommu_context {
  * @clk_enable_count: The ref count of clock enable calls
  * @clks: Array of pointers to IOMMU clocks
  * @smmu_info: smmu info used in a5xx preemption
- * @protect: register protection settings for the iommu.
  */
 struct kgsl_iommu {
 	struct kgsl_iommu_context ctx[KGSL_IOMMU_CONTEXT_MAX];
@@ -123,7 +122,6 @@ struct kgsl_iommu {
 	atomic_t clk_enable_count;
 	struct clk *clks[KGSL_IOMMU_MAX_CLKS];
 	struct kgsl_memdesc smmu_info;
-	struct kgsl_protected_registers protect;
 };
 
 /*
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index 637e57d..93012aa 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -68,8 +68,6 @@ struct kgsl_mmu_ops {
 	bool (*mmu_pt_equal)(struct kgsl_mmu *mmu,
 			struct kgsl_pagetable *pt, u64 ttbr0);
 	int (*mmu_set_pf_policy)(struct kgsl_mmu *mmu, unsigned long pf_policy);
-	struct kgsl_protected_registers *(*mmu_get_prot_regs)
-			(struct kgsl_mmu *mmu);
 	int (*mmu_init_pt)(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt);
 	void (*mmu_add_global)(struct kgsl_mmu *mmu,
 			struct kgsl_memdesc *memdesc, const char *name);
@@ -328,15 +326,6 @@ static inline void kgsl_mmu_clear_fsr(struct kgsl_mmu *mmu)
 		return mmu->mmu_ops->mmu_clear_fsr(mmu);
 }
 
-static inline struct kgsl_protected_registers *kgsl_mmu_get_prot_regs
-						(struct kgsl_mmu *mmu)
-{
-	if (MMU_OP_VALID(mmu, mmu_get_prot_regs))
-		return mmu->mmu_ops->mmu_get_prot_regs(mmu);
-
-	return NULL;
-}
-
 static inline int kgsl_mmu_is_perprocess(struct kgsl_mmu *mmu)
 {
 	return MMU_FEATURE(mmu, KGSL_MMU_GLOBAL_PAGETABLE) ? 0 : 1;
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index c9bf42f..6786ecb4 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -1991,9 +1991,9 @@ static bool _gpu_freq_supported(struct kgsl_pwrctrl *pwr, unsigned int freq)
 	return false;
 }
 
-static void kgsl_pwrctrl_disable_unused_opp(struct kgsl_device *device)
+void kgsl_pwrctrl_disable_unused_opp(struct kgsl_device *device,
+		struct device *dev)
 {
-	struct device *dev = &device->pdev->dev;
 	struct dev_pm_opp *opp;
 	unsigned long freq = 0;
 	int ret;
@@ -2083,7 +2083,7 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
 			pwr->pwrlevels[i].gpu_freq = freq;
 	}
 
-	kgsl_pwrctrl_disable_unused_opp(device);
+	kgsl_pwrctrl_disable_unused_opp(device, &pdev->dev);
 
 	kgsl_clk_set_rate(device, pwr->num_pwrlevels - 1);
 
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index f3a5648..6dc7c53 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -105,6 +105,7 @@ struct kgsl_pwrlevel {
 	unsigned int bus_freq;
 	unsigned int bus_min;
 	unsigned int bus_max;
+	unsigned int acd_level;
 };
 
 struct kgsl_regulator {
@@ -267,4 +268,7 @@ void kgsl_pwrctrl_set_constraint(struct kgsl_device *device,
 void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device,
 			unsigned long timeout_us);
 void kgsl_pwrctrl_set_default_gpu_pwrlevel(struct kgsl_device *device);
+void kgsl_pwrctrl_disable_unused_opp(struct kgsl_device *device,
+		struct device *dev);
+
 #endif /* __KGSL_PWRCTRL_H */
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index 89b227b..790b379 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -1041,6 +1041,12 @@ int kgsl_pwrscale_init(struct device *dev, const char *governor)
 		 * frequency.
 		 */
 		ret = dev_pm_opp_of_add_table(device->busmondev);
+		/*
+		 * Disable OPP which are not supported as per GPU freq plan.
+		 * This is need to ensure freq_table specified in bus_profile
+		 * above matches OPP table.
+		 */
+		kgsl_pwrctrl_disable_unused_opp(device, device->busmondev);
 		if (!ret)
 			bus_devfreq = devfreq_add_device(device->busmondev,
 				&pwrscale->bus_profile.profile, "gpubw_mon",
diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c
index 9428ea7..c52bd16 100644
--- a/drivers/hid/hid-a4tech.c
+++ b/drivers/hid/hid-a4tech.c
@@ -26,12 +26,36 @@
 #define A4_2WHEEL_MOUSE_HACK_7	0x01
 #define A4_2WHEEL_MOUSE_HACK_B8	0x02
 
+#define A4_WHEEL_ORIENTATION	(HID_UP_GENDESK | 0x000000b8)
+
 struct a4tech_sc {
 	unsigned long quirks;
 	unsigned int hw_wheel;
 	__s32 delayed_value;
 };
 
+static int a4_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+			    struct hid_field *field, struct hid_usage *usage,
+			    unsigned long **bit, int *max)
+{
+	struct a4tech_sc *a4 = hid_get_drvdata(hdev);
+
+	if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8 &&
+	    usage->hid == A4_WHEEL_ORIENTATION) {
+		/*
+		 * We do not want to have this usage mapped to anything as it's
+		 * nonstandard and doesn't really behave like an HID report.
+		 * It's only selecting the orientation (vertical/horizontal) of
+		 * the previous mouse wheel report. The input_events will be
+		 * generated once both reports are recorded in a4_event().
+		 */
+		return -1;
+	}
+
+	return 0;
+
+}
+
 static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi,
 		struct hid_field *field, struct hid_usage *usage,
 		unsigned long **bit, int *max)
@@ -53,8 +77,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
 	struct a4tech_sc *a4 = hid_get_drvdata(hdev);
 	struct input_dev *input;
 
-	if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput ||
-			!usage->type)
+	if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
 		return 0;
 
 	input = field->hidinput->input;
@@ -65,7 +88,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
 			return 1;
 		}
 
-		if (usage->hid == 0x000100b8) {
+		if (usage->hid == A4_WHEEL_ORIENTATION) {
 			input_event(input, EV_REL, value ? REL_HWHEEL :
 					REL_WHEEL, a4->delayed_value);
 			return 1;
@@ -129,6 +152,7 @@ MODULE_DEVICE_TABLE(hid, a4_devices);
 static struct hid_driver a4_driver = {
 	.name = "a4tech",
 	.id_table = a4_devices,
+	.input_mapping = a4_input_mapping,
 	.input_mapped = a4_input_mapped,
 	.event = a4_event,
 	.probe = a4_probe,
diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c
index 6e1a4a4..ab9da59 100644
--- a/drivers/hid/hid-holtek-kbd.c
+++ b/drivers/hid/hid-holtek-kbd.c
@@ -126,9 +126,14 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
 
 	/* Locate the boot interface, to receive the LED change events */
 	struct usb_interface *boot_interface = usb_ifnum_to_if(usb_dev, 0);
+	struct hid_device *boot_hid;
+	struct hid_input *boot_hid_input;
 
-	struct hid_device *boot_hid = usb_get_intfdata(boot_interface);
-	struct hid_input *boot_hid_input = list_first_entry(&boot_hid->inputs,
+	if (unlikely(boot_interface == NULL))
+		return -ENODEV;
+
+	boot_hid = usb_get_intfdata(boot_interface);
+	boot_hid_input = list_first_entry(&boot_hid->inputs,
 		struct hid_input, list);
 
 	return boot_hid_input->input->event(boot_hid_input->input, type, code,
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 0e1c1e4..bb35715 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -971,6 +971,7 @@
 #define USB_DEVICE_ID_SAITEK_RAT7	0x0cd7
 #define USB_DEVICE_ID_SAITEK_RAT9	0x0cfa
 #define USB_DEVICE_ID_SAITEK_MMO7	0x0cd0
+#define USB_DEVICE_ID_SAITEK_X52	0x075c
 
 #define USB_VENDOR_ID_SAMSUNG		0x0419
 #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE	0x0001
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index bbb5733..f032911 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -143,6 +143,7 @@ static const struct hid_device_id hid_quirks[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPAD), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET },
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 9671a4b..31f1023 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -587,10 +587,14 @@ static void sony_set_leds(struct sony_sc *sc);
 static inline void sony_schedule_work(struct sony_sc *sc,
 				      enum sony_worker which)
 {
+	unsigned long flags;
+
 	switch (which) {
 	case SONY_WORKER_STATE:
-		if (!sc->defer_initialization)
+		spin_lock_irqsave(&sc->lock, flags);
+		if (!sc->defer_initialization && sc->state_worker_initialized)
 			schedule_work(&sc->state_worker);
+		spin_unlock_irqrestore(&sc->lock, flags);
 		break;
 	case SONY_WORKER_HOTPLUG:
 		if (sc->hotplug_worker_initialized)
@@ -2553,13 +2557,18 @@ static inline void sony_init_output_report(struct sony_sc *sc,
 
 static inline void sony_cancel_work_sync(struct sony_sc *sc)
 {
+	unsigned long flags;
+
 	if (sc->hotplug_worker_initialized)
 		cancel_work_sync(&sc->hotplug_worker);
-	if (sc->state_worker_initialized)
+	if (sc->state_worker_initialized) {
+		spin_lock_irqsave(&sc->lock, flags);
+		sc->state_worker_initialized = 0;
+		spin_unlock_irqrestore(&sc->lock, flags);
 		cancel_work_sync(&sc->state_worker);
+	}
 }
 
-
 static int sony_input_configured(struct hid_device *hdev,
 					struct hid_input *hidinput)
 {
diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
index bea8def..30b8c32 100644
--- a/drivers/hid/hid-tmff.c
+++ b/drivers/hid/hid-tmff.c
@@ -34,6 +34,8 @@
 
 #include "hid-ids.h"
 
+#define THRUSTMASTER_DEVICE_ID_2_IN_1_DT	0xb320
+
 static const signed short ff_rumble[] = {
 	FF_RUMBLE,
 	-1
@@ -88,6 +90,7 @@ static int tmff_play(struct input_dev *dev, void *data,
 	struct hid_field *ff_field = tmff->ff_field;
 	int x, y;
 	int left, right;	/* Rumbling */
+	int motor_swap;
 
 	switch (effect->type) {
 	case FF_CONSTANT:
@@ -112,6 +115,13 @@ static int tmff_play(struct input_dev *dev, void *data,
 					ff_field->logical_minimum,
 					ff_field->logical_maximum);
 
+		/* 2-in-1 strong motor is left */
+		if (hid->product == THRUSTMASTER_DEVICE_ID_2_IN_1_DT) {
+			motor_swap = left;
+			left = right;
+			right = motor_swap;
+		}
+
 		dbg_hid("(left,right)=(%08x, %08x)\n", left, right);
 		ff_field->value[0] = left;
 		ff_field->value[1] = right;
@@ -238,6 +248,8 @@ static const struct hid_device_id tm_devices[] = {
 		.driver_data = (unsigned long)ff_rumble },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304),   /* FireStorm Dual Power 2 (and 3) */
 		.driver_data = (unsigned long)ff_rumble },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, THRUSTMASTER_DEVICE_ID_2_IN_1_DT),   /* Dual Trigger 2-in-1 */
+		.driver_data = (unsigned long)ff_rumble },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323),   /* Dual Trigger 3-in-1 (PC Mode) */
 		.driver_data = (unsigned long)ff_rumble },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324),   /* Dual Trigger 3-in-1 (PS3 Mode) */
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index a746017..5a949ca 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -297,6 +297,14 @@ static int hiddev_open(struct inode *inode, struct file *file)
 	spin_unlock_irq(&list->hiddev->list_lock);
 
 	mutex_lock(&hiddev->existancelock);
+	/*
+	 * recheck exist with existance lock held to
+	 * avoid opening a disconnected device
+	 */
+	if (!list->hiddev->exist) {
+		res = -ENODEV;
+		goto bail_unlock;
+	}
 	if (!list->hiddev->open++)
 		if (list->hiddev->exist) {
 			struct hid_device *hid = hiddev->hid;
@@ -313,6 +321,10 @@ static int hiddev_open(struct inode *inode, struct file *file)
 	hid_hw_power(hid, PM_HINT_NORMAL);
 bail_unlock:
 	mutex_unlock(&hiddev->existancelock);
+
+	spin_lock_irq(&list->hiddev->list_lock);
+	list_del(&list->node);
+	spin_unlock_irq(&list->hiddev->list_lock);
 bail:
 	file->private_data = NULL;
 	vfree(list);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index e56dc97..50ef7b6 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -848,6 +848,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
 		y >>= 1;
 		distance >>= 1;
 	}
+	if (features->type == INTUOSHT2)
+		distance = features->distance_max - distance;
 	input_report_abs(input, ABS_X, x);
 	input_report_abs(input, ABS_Y, y);
 	input_report_abs(input, ABS_DISTANCE, distance);
@@ -1061,7 +1063,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
 	input_report_key(input, BTN_BASE2, (data[11] & 0x02));
 
 	if (data[12] & 0x80)
-		input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f));
+		input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f) - 1);
 	else
 		input_report_abs(input, ABS_WHEEL, 0);
 
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 2f164bd..fdb0f83 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -38,7 +38,7 @@
 
 static unsigned long virt_to_hvpfn(void *addr)
 {
-	unsigned long paddr;
+	phys_addr_t paddr;
 
 	if (is_vmalloc_addr(addr))
 		paddr = page_to_phys(vmalloc_to_page(addr)) +
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 78603b7..eba692c 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -818,7 +818,7 @@ static const u16 NCT6106_REG_TARGET[] = { 0x111, 0x121, 0x131 };
 static const u16 NCT6106_REG_WEIGHT_TEMP_SEL[] = { 0x168, 0x178, 0x188 };
 static const u16 NCT6106_REG_WEIGHT_TEMP_STEP[] = { 0x169, 0x179, 0x189 };
 static const u16 NCT6106_REG_WEIGHT_TEMP_STEP_TOL[] = { 0x16a, 0x17a, 0x18a };
-static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x17c };
+static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x18b };
 static const u16 NCT6106_REG_WEIGHT_TEMP_BASE[] = { 0x16c, 0x17c, 0x18c };
 static const u16 NCT6106_REG_WEIGHT_DUTY_BASE[] = { 0x16d, 0x17d, 0x18d };
 
@@ -3673,6 +3673,7 @@ static int nct6775_probe(struct platform_device *pdev)
 		data->REG_FAN_TIME[0] = NCT6106_REG_FAN_STOP_TIME;
 		data->REG_FAN_TIME[1] = NCT6106_REG_FAN_STEP_UP_TIME;
 		data->REG_FAN_TIME[2] = NCT6106_REG_FAN_STEP_DOWN_TIME;
+		data->REG_TOLERANCE_H = NCT6106_REG_TOLERANCE_H;
 		data->REG_PWM[0] = NCT6106_REG_PWM;
 		data->REG_PWM[1] = NCT6106_REG_FAN_START_OUTPUT;
 		data->REG_PWM[2] = NCT6106_REG_FAN_STOP_OUTPUT;
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index 2876c18..38ffbdb 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -768,7 +768,7 @@ static struct attribute *nct7802_in_attrs[] = {
 	&sensor_dev_attr_in3_alarm.dev_attr.attr,
 	&sensor_dev_attr_in3_beep.dev_attr.attr,
 
-	&sensor_dev_attr_in4_input.dev_attr.attr,	/* 17 */
+	&sensor_dev_attr_in4_input.dev_attr.attr,	/* 16 */
 	&sensor_dev_attr_in4_min.dev_attr.attr,
 	&sensor_dev_attr_in4_max.dev_attr.attr,
 	&sensor_dev_attr_in4_alarm.dev_attr.attr,
@@ -794,9 +794,9 @@ static umode_t nct7802_in_is_visible(struct kobject *kobj,
 
 	if (index >= 6 && index < 11 && (reg & 0x03) != 0x03)	/* VSEN1 */
 		return 0;
-	if (index >= 11 && index < 17 && (reg & 0x0c) != 0x0c)	/* VSEN2 */
+	if (index >= 11 && index < 16 && (reg & 0x0c) != 0x0c)	/* VSEN2 */
 		return 0;
-	if (index >= 17 && (reg & 0x30) != 0x30)		/* VSEN3 */
+	if (index >= 16 && (reg & 0x30) != 0x30)		/* VSEN3 */
 		return 0;
 
 	return attr->mode;
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index cda385a..dfb5638 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -123,6 +123,17 @@
 	  hardware component to another. It can also be used to pass
 	  software generated events.
 
+config CORESIGHT_CTI_SAVE_DISABLE
+	bool "Turn off CTI save and restore"
+	depends on CORESIGHT_CTI
+	help
+	  Turns off CoreSight CTI save and restore support for cpu CTIs. This
+	  avoids voting for the clocks during probe as well as the associated
+	  save and restore latency at the cost of breaking cpu CTI support on
+	  targets where cpu CTIs have to be preserved across power collapse.
+
+	  If unsure, say 'N' here to avoid breaking cpu CTI support.
+
 config CORESIGHT_OST
 	bool "CoreSight OST framework"
 	depends on CORESIGHT_STM
diff --git a/drivers/hwtracing/coresight/coresight-common.h b/drivers/hwtracing/coresight/coresight-common.h
index b49a588..b6db835 100644
--- a/drivers/hwtracing/coresight/coresight-common.h
+++ b/drivers/hwtracing/coresight/coresight-common.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CORESIGHT_COMMON_H
@@ -16,6 +16,7 @@ struct coresight_csr {
 
 #ifdef CONFIG_CORESIGHT_CSR
 extern void msm_qdss_csr_enable_bam_to_usb(struct coresight_csr *csr);
+extern void msm_qdss_csr_enable_flush(struct coresight_csr *csr);
 extern void msm_qdss_csr_disable_bam_to_usb(struct coresight_csr *csr);
 extern void msm_qdss_csr_disable_flush(struct coresight_csr *csr);
 extern int coresight_csr_hwctrl_set(struct coresight_csr *csr, uint64_t addr,
diff --git a/drivers/hwtracing/coresight/coresight-csr.c b/drivers/hwtracing/coresight/coresight-csr.c
index 309e5a0..e0d023719 100644
--- a/drivers/hwtracing/coresight/coresight-csr.c
+++ b/drivers/hwtracing/coresight/coresight-csr.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2012-2013, 2015-2-17 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, 2015-2017, 2019 The Linux Foundation. All rights reserved.
  */
 
 #include <linux/kernel.h>
@@ -66,12 +66,15 @@ do {									\
 #define BLKSIZE_1024		2
 #define BLKSIZE_2048		3
 
+#define FLUSHPERIOD_2048	0x800
+
 struct csr_drvdata {
 	void __iomem		*base;
 	phys_addr_t		pbase;
 	struct device		*dev;
 	struct coresight_device	*csdev;
 	uint32_t		blksize;
+	uint32_t		flushperiod;
 	struct coresight_csr		csr;
 	struct clk		*clk;
 	spinlock_t		spin_lock;
@@ -79,6 +82,7 @@ struct csr_drvdata {
 	bool			hwctrl_set_support;
 	bool			set_byte_cntr_support;
 	bool			timestamp_support;
+	bool			enable_flush;
 };
 
 static LIST_HEAD(csr_list);
@@ -86,10 +90,23 @@ static DEFINE_MUTEX(csr_lock);
 
 #define to_csr_drvdata(c) container_of(c, struct csr_drvdata, csr)
 
+static void msm_qdss_csr_config_flush_period(struct csr_drvdata *drvdata)
+{
+	uint32_t usbflshctrl;
+
+	CSR_UNLOCK(drvdata);
+
+	usbflshctrl = csr_readl(drvdata, CSR_USBFLSHCTRL);
+	usbflshctrl = (usbflshctrl & ~0x3FFFC) | (drvdata->flushperiod << 2);
+	csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
+
+	CSR_LOCK(drvdata);
+}
+
 void msm_qdss_csr_enable_bam_to_usb(struct coresight_csr *csr)
 {
 	struct csr_drvdata *drvdata;
-	uint32_t usbbamctrl, usbflshctrl;
+	uint32_t usbbamctrl;
 	unsigned long flags;
 
 	if (csr == NULL)
@@ -106,12 +123,6 @@ void msm_qdss_csr_enable_bam_to_usb(struct coresight_csr *csr)
 	usbbamctrl = (usbbamctrl & ~0x3) | drvdata->blksize;
 	csr_writel(drvdata, usbbamctrl, CSR_USBBAMCTRL);
 
-	usbflshctrl = csr_readl(drvdata, CSR_USBFLSHCTRL);
-	usbflshctrl = (usbflshctrl & ~0x3FFFC) | (0xFFFF << 2);
-	csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
-	usbflshctrl |= 0x2;
-	csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
-
 	usbbamctrl |= 0x4;
 	csr_writel(drvdata, usbbamctrl, CSR_USBBAMCTRL);
 
@@ -120,6 +131,36 @@ void msm_qdss_csr_enable_bam_to_usb(struct coresight_csr *csr)
 }
 EXPORT_SYMBOL(msm_qdss_csr_enable_bam_to_usb);
 
+void msm_qdss_csr_enable_flush(struct coresight_csr *csr)
+{
+	struct csr_drvdata *drvdata;
+	uint32_t usbflshctrl;
+	unsigned long flags;
+
+	if (csr == NULL)
+		return;
+
+	drvdata = to_csr_drvdata(csr);
+	if (IS_ERR_OR_NULL(drvdata) || !drvdata->usb_bam_support)
+		return;
+
+	spin_lock_irqsave(&drvdata->spin_lock, flags);
+
+	msm_qdss_csr_config_flush_period(drvdata);
+
+	CSR_UNLOCK(drvdata);
+
+	usbflshctrl = csr_readl(drvdata, CSR_USBFLSHCTRL);
+	usbflshctrl |= 0x2;
+	csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
+
+	CSR_LOCK(drvdata);
+	drvdata->enable_flush = true;
+	spin_unlock_irqrestore(&drvdata->spin_lock, flags);
+}
+EXPORT_SYMBOL(msm_qdss_csr_enable_flush);
+
+
 void msm_qdss_csr_disable_bam_to_usb(struct coresight_csr *csr)
 {
 	struct csr_drvdata *drvdata;
@@ -166,6 +207,7 @@ void msm_qdss_csr_disable_flush(struct coresight_csr *csr)
 	csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
 
 	CSR_LOCK(drvdata);
+	drvdata->enable_flush = false;
 	spin_unlock_irqrestore(&drvdata->spin_lock, flags);
 }
 EXPORT_SYMBOL(msm_qdss_csr_disable_flush);
@@ -295,14 +337,66 @@ static ssize_t timestamp_show(struct device *dev,
 
 static DEVICE_ATTR_RO(timestamp);
 
+static ssize_t flushperiod_show(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	struct csr_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (IS_ERR_OR_NULL(drvdata) || !drvdata->usb_bam_support) {
+		dev_err(dev, "Invalid param\n");
+		return -EINVAL;
+	}
+
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", drvdata->flushperiod);
+}
+
+static ssize_t flushperiod_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf,
+				size_t size)
+{
+	unsigned long flags;
+	unsigned long val;
+	struct csr_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (IS_ERR_OR_NULL(drvdata) || !drvdata->usb_bam_support) {
+		dev_err(dev, "Invalid param\n");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&drvdata->spin_lock, flags);
+
+	if (kstrtoul(buf, 0, &val) || val > 0xffff) {
+		spin_unlock_irqrestore(&drvdata->spin_lock, flags);
+		return -EINVAL;
+	}
+
+	if (drvdata->flushperiod == val)
+		goto out;
+
+	drvdata->flushperiod = val;
+
+	if (drvdata->enable_flush)
+		msm_qdss_csr_config_flush_period(drvdata);
+
+out:
+	spin_unlock_irqrestore(&drvdata->spin_lock, flags);
+	return size;
+}
+
+static DEVICE_ATTR_RW(flushperiod);
+
 static struct attribute *csr_attrs[] = {
 	&dev_attr_timestamp.attr,
+	&dev_attr_flushperiod.attr,
 	NULL,
 };
 
 static struct attribute_group csr_attr_grp = {
 	.attrs = csr_attrs,
 };
+
 static const struct attribute_group *csr_attr_grps[] = {
 	&csr_attr_grp,
 	NULL,
@@ -374,14 +468,16 @@ static int csr_probe(struct platform_device *pdev)
 	else
 		dev_dbg(dev, "timestamp_support operation supported\n");
 
+	if (drvdata->usb_bam_support)
+		drvdata->flushperiod = FLUSHPERIOD_2048;
+
 	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
 	if (!desc)
 		return -ENOMEM;
 	desc->type = CORESIGHT_DEV_TYPE_NONE;
 	desc->pdata = pdev->dev.platform_data;
 	desc->dev = &pdev->dev;
-	if (drvdata->timestamp_support)
-		desc->groups = csr_attr_grps;
+	desc->groups = csr_attr_grps;
 
 	drvdata->csdev = coresight_register(desc);
 	if (IS_ERR(drvdata->csdev))
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 4217742..f98abee 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -1015,11 +1015,12 @@ static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata)
 	tmc_sync_etr_buf(drvdata);
 }
 
-void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
+void tmc_etr_disable_hw(struct tmc_drvdata *drvdata, bool flush)
 {
 	CS_UNLOCK(drvdata->base);
 
-	tmc_flush_and_stop(drvdata);
+	if (flush)
+		tmc_flush_and_stop(drvdata);
 	/*
 	 * When operating in sysFS mode the content of the buffer needs to be
 	 * read before the TMC is disabled.
@@ -1114,6 +1115,7 @@ static void __tmc_etr_enable_to_bam(struct tmc_drvdata *drvdata)
 
 	CS_LOCK(drvdata->base);
 
+	msm_qdss_csr_enable_flush(drvdata->csr);
 	drvdata->enable_to_bam = true;
 }
 
@@ -1442,7 +1444,7 @@ static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
 	return -EINVAL;
 }
 
-static void tmc_disable_etr_sink(struct coresight_device *csdev)
+static void _tmc_disable_etr_sink(struct coresight_device *csdev, bool flush)
 {
 	unsigned long flags;
 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -1468,10 +1470,10 @@ static void tmc_disable_etr_sink(struct coresight_device *csdev)
 				goto out;
 			} else {
 				usb_qdss_close(drvdata->usbch);
-				tmc_etr_disable_hw(drvdata);
+				tmc_etr_disable_hw(drvdata, flush);
 			}
 		} else {
-			tmc_etr_disable_hw(drvdata);
+			tmc_etr_disable_hw(drvdata, flush);
 		}
 		drvdata->mode = CS_MODE_DISABLED;
 	}
@@ -1506,6 +1508,11 @@ static void tmc_disable_etr_sink(struct coresight_device *csdev)
 	dev_info(drvdata->dev, "TMC-ETR disabled\n");
 }
 
+static void tmc_disable_etr_sink(struct coresight_device *csdev)
+{
+	_tmc_disable_etr_sink(csdev, true);
+}
+
 int tmc_etr_switch_mode(struct tmc_drvdata *drvdata, const char *out_mode)
 {
 	enum tmc_etr_out_mode new_mode, old_mode;
@@ -1525,7 +1532,7 @@ int tmc_etr_switch_mode(struct tmc_drvdata *drvdata, const char *out_mode)
 		return 0;
 	}
 
-	tmc_disable_etr_sink(drvdata->csdev);
+	_tmc_disable_etr_sink(drvdata->csdev, false);
 	old_mode = drvdata->out_mode;
 	drvdata->out_mode = new_mode;
 	if (tmc_enable_etr_sink_sysfs(drvdata->csdev)) {
@@ -1587,7 +1594,7 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
 
 	/* Disable the TMC if need be */
 	if (drvdata->mode == CS_MODE_SYSFS)
-		tmc_etr_disable_hw(drvdata);
+		tmc_etr_disable_hw(drvdata, true);
 
 	drvdata->reading = true;
 out:
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 994339a..a30e360 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -312,7 +312,7 @@ void tmc_free_etr_buf(struct etr_buf *etr_buf);
 void __tmc_etr_disable_to_bam(struct tmc_drvdata *drvdata);
 void tmc_etr_bam_disable(struct tmc_drvdata *drvdata);
 void tmc_etr_enable_hw(struct tmc_drvdata *drvdata);
-void tmc_etr_disable_hw(struct tmc_drvdata *drvdata);
+void tmc_etr_disable_hw(struct tmc_drvdata *drvdata, bool flush);
 void usb_notifier(void *priv, unsigned int event, struct qdss_request *d_req,
 		  struct usb_qdss_ch *ch);
 int tmc_etr_bam_init(struct amba_device *adev,
diff --git a/drivers/iio/accel/cros_ec_accel_legacy.c b/drivers/iio/accel/cros_ec_accel_legacy.c
index 063e89e..c776a35 100644
--- a/drivers/iio/accel/cros_ec_accel_legacy.c
+++ b/drivers/iio/accel/cros_ec_accel_legacy.c
@@ -328,7 +328,6 @@ static const struct iio_chan_spec_ext_info cros_ec_accel_legacy_ext_info[] = {
 		.modified = 1,					        \
 		.info_mask_separate =					\
 			BIT(IIO_CHAN_INFO_RAW) |			\
-			BIT(IIO_CHAN_INFO_SCALE) |			\
 			BIT(IIO_CHAN_INFO_CALIBBIAS),			\
 		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE),	\
 		.ext_info = cros_ec_accel_legacy_ext_info,		\
diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
index 0538ff8..49c1956 100644
--- a/drivers/iio/adc/max9611.c
+++ b/drivers/iio/adc/max9611.c
@@ -86,7 +86,7 @@
 #define MAX9611_TEMP_MAX_POS		0x7f80
 #define MAX9611_TEMP_MAX_NEG		0xff80
 #define MAX9611_TEMP_MIN_NEG		0xd980
-#define MAX9611_TEMP_MASK		GENMASK(7, 15)
+#define MAX9611_TEMP_MASK		GENMASK(15, 7)
 #define MAX9611_TEMP_SHIFT		0x07
 #define MAX9611_TEMP_RAW(_r)		((_r) >> MAX9611_TEMP_SHIFT)
 #define MAX9611_TEMP_SCALE_NUM		1000000
@@ -483,7 +483,7 @@ static int max9611_init(struct max9611_dev *max9611)
 	if (ret)
 		return ret;
 
-	regval = ret & MAX9611_TEMP_MASK;
+	regval &= MAX9611_TEMP_MASK;
 
 	if ((regval > MAX9611_TEMP_MAX_POS &&
 	     regval < MAX9611_TEMP_MIN_NEG) ||
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index ef459f2..7586c1d 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -3182,18 +3182,18 @@ static int ib_mad_port_open(struct ib_device *device,
 	if (has_smi)
 		cq_size *= 2;
 
+	port_priv->pd = ib_alloc_pd(device, 0);
+	if (IS_ERR(port_priv->pd)) {
+		dev_err(&device->dev, "Couldn't create ib_mad PD\n");
+		ret = PTR_ERR(port_priv->pd);
+		goto error3;
+	}
+
 	port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
 			IB_POLL_WORKQUEUE);
 	if (IS_ERR(port_priv->cq)) {
 		dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
 		ret = PTR_ERR(port_priv->cq);
-		goto error3;
-	}
-
-	port_priv->pd = ib_alloc_pd(device, 0);
-	if (IS_ERR(port_priv->pd)) {
-		dev_err(&device->dev, "Couldn't create ib_mad PD\n");
-		ret = PTR_ERR(port_priv->pd);
 		goto error4;
 	}
 
@@ -3236,11 +3236,11 @@ static int ib_mad_port_open(struct ib_device *device,
 error7:
 	destroy_mad_qp(&port_priv->qp_info[0]);
 error6:
-	ib_dealloc_pd(port_priv->pd);
-error4:
 	ib_free_cq(port_priv->cq);
 	cleanup_recv_queue(&port_priv->qp_info[1]);
 	cleanup_recv_queue(&port_priv->qp_info[0]);
+error4:
+	ib_dealloc_pd(port_priv->pd);
 error3:
 	kfree(port_priv);
 
@@ -3270,8 +3270,8 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
 	destroy_workqueue(port_priv->wq);
 	destroy_mad_qp(&port_priv->qp_info[1]);
 	destroy_mad_qp(&port_priv->qp_info[0]);
-	ib_dealloc_pd(port_priv->pd);
 	ib_free_cq(port_priv->cq);
+	ib_dealloc_pd(port_priv->pd);
 	cleanup_recv_queue(&port_priv->qp_info[1]);
 	cleanup_recv_queue(&port_priv->qp_info[0]);
 	/* XXX: Handle deallocation of MAD registration tables */
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index c34a685..a18f3f8 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -49,6 +49,7 @@
 #include <linux/sched.h>
 #include <linux/semaphore.h>
 #include <linux/slab.h>
+#include <linux/nospec.h>
 
 #include <linux/uaccess.h>
 
@@ -868,11 +869,14 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
 
 	if (get_user(id, arg))
 		return -EFAULT;
+	if (id >= IB_UMAD_MAX_AGENTS)
+		return -EINVAL;
 
 	mutex_lock(&file->port->file_mutex);
 	mutex_lock(&file->mutex);
 
-	if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
+	id = array_index_nospec(id, IB_UMAD_MAX_AGENTS);
+	if (!__get_agent(file, id)) {
 		ret = -EINVAL;
 		goto out;
 	}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 9bab4fb..bd1fdad 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -51,22 +51,12 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
 static int mr_cache_max_order(struct mlx5_ib_dev *dev);
 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
-static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
-{
-	return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
-}
 
 static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
 {
 	return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
 }
 
-static bool use_umr(struct mlx5_ib_dev *dev, int order)
-{
-	return order <= mr_cache_max_order(dev) &&
-		umr_can_modify_entity_size(dev);
-}
-
 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
 {
 	int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
@@ -1305,7 +1295,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 {
 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
 	struct mlx5_ib_mr *mr = NULL;
-	bool populate_mtts = false;
+	bool use_umr;
 	struct ib_umem *umem;
 	int page_shift;
 	int npages;
@@ -1338,29 +1328,30 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 	if (err < 0)
 		return ERR_PTR(err);
 
-	if (use_umr(dev, order)) {
+	use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) &&
+		  (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
+		   !MLX5_CAP_GEN(dev->mdev, atomic));
+
+	if (order <= mr_cache_max_order(dev) && use_umr) {
 		mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
 					 page_shift, order, access_flags);
 		if (PTR_ERR(mr) == -EAGAIN) {
 			mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
 			mr = NULL;
 		}
-		populate_mtts = false;
 	} else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
 		if (access_flags & IB_ACCESS_ON_DEMAND) {
 			err = -EINVAL;
 			pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
 			goto error;
 		}
-		populate_mtts = true;
+		use_umr = false;
 	}
 
 	if (!mr) {
-		if (!umr_can_modify_entity_size(dev))
-			populate_mtts = true;
 		mutex_lock(&dev->slow_path_mutex);
 		mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
-				page_shift, access_flags, populate_mtts);
+				page_shift, access_flags, !use_umr);
 		mutex_unlock(&dev->slow_path_mutex);
 	}
 
@@ -1378,7 +1369,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 	update_odp_mr(mr);
 #endif
 
-	if (!populate_mtts) {
+	if (use_umr) {
 		int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
 
 		if (access_flags & IB_ACCESS_ON_DEMAND)
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index 7807325..c431df7 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -141,7 +141,12 @@ static int iforce_usb_probe(struct usb_interface *intf,
 		return -ENODEV;
 
 	epirq = &interface->endpoint[0].desc;
+	if (!usb_endpoint_is_int_in(epirq))
+		return -ENODEV;
+
 	epout = &interface->endpoint[1].desc;
+	if (!usb_endpoint_is_int_out(epout))
+		return -ENODEV;
 
 	if (!(iforce = kzalloc(sizeof(struct iforce) + 32, GFP_KERNEL)))
 		goto fail;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 530142b..eb9b9de 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1810,6 +1810,30 @@ static int elantech_create_smbus(struct psmouse *psmouse,
 				  leave_breadcrumbs);
 }
 
+static bool elantech_use_host_notify(struct psmouse *psmouse,
+				     struct elantech_device_info *info)
+{
+	if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
+		return true;
+
+	switch (info->bus) {
+	case ETP_BUS_PS2_ONLY:
+		/* expected case */
+		break;
+	case ETP_BUS_SMB_HST_NTFY_ONLY:
+	case ETP_BUS_PS2_SMB_HST_NTFY:
+		/* SMbus implementation is stable since 2018 */
+		if (dmi_get_bios_year() >= 2018)
+			return true;
+	default:
+		psmouse_dbg(psmouse,
+			    "Ignoring SMBus bus provider %d\n", info->bus);
+		break;
+	}
+
+	return false;
+}
+
 /**
  * elantech_setup_smbus - called once the PS/2 devices are enumerated
  * and decides to instantiate a SMBus InterTouch device.
@@ -1829,7 +1853,7 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
 		 * i2c_blacklist_pnp_ids.
 		 * Old ICs are up to the user to decide.
 		 */
-		if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) ||
+		if (!elantech_use_host_notify(psmouse, info) ||
 		    psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids))
 			return -ENXIO;
 	}
@@ -1849,34 +1873,6 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
 	return 0;
 }
 
-static bool elantech_use_host_notify(struct psmouse *psmouse,
-				     struct elantech_device_info *info)
-{
-	if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
-		return true;
-
-	switch (info->bus) {
-	case ETP_BUS_PS2_ONLY:
-		/* expected case */
-		break;
-	case ETP_BUS_SMB_ALERT_ONLY:
-		/* fall-through  */
-	case ETP_BUS_PS2_SMB_ALERT:
-		psmouse_dbg(psmouse, "Ignoring SMBus provider through alert protocol.\n");
-		break;
-	case ETP_BUS_SMB_HST_NTFY_ONLY:
-		/* fall-through  */
-	case ETP_BUS_PS2_SMB_HST_NTFY:
-		return true;
-	default:
-		psmouse_dbg(psmouse,
-			    "Ignoring SMBus bus provider %d.\n",
-			    info->bus);
-	}
-
-	return false;
-}
-
 int elantech_init_smbus(struct psmouse *psmouse)
 {
 	struct elantech_device_info info;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index af7d484..06cebde 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -185,6 +185,7 @@ static const char * const smbus_pnp_ids[] = {
 	"LEN2055", /* E580 */
 	"SYN3052", /* HP EliteBook 840 G4 */
 	"SYN3221", /* HP 15-ay000 */
+	"SYN323d", /* HP Spectre X360 13-w013dx */
 	NULL
 };
 
diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
index 10a0391..538986e 100644
--- a/drivers/input/mouse/trackpoint.h
+++ b/drivers/input/mouse/trackpoint.h
@@ -161,7 +161,8 @@ struct trackpoint_data {
 #ifdef CONFIG_MOUSE_PS2_TRACKPOINT
 int trackpoint_detect(struct psmouse *psmouse, bool set_properties);
 #else
-inline int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
+static inline int trackpoint_detect(struct psmouse *psmouse,
+				    bool set_properties)
 {
 	return -ENOSYS;
 }
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index 75b5006..b1cf0c9 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -116,6 +116,10 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
 	if (intf->cur_altsetting->desc.bNumEndpoints < 1)
 		return -ENODEV;
 
+	endpoint = &intf->cur_altsetting->endpoint[0].desc;
+	if (!usb_endpoint_is_int_in(endpoint))
+		return -ENODEV;
+
 	kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL);
 	input_dev = input_allocate_device();
 	if (!kbtab || !input_dev)
@@ -154,8 +158,6 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
 	input_set_abs_params(input_dev, ABS_Y, 0, 0x1750, 4, 0);
 	input_set_abs_params(input_dev, ABS_PRESSURE, 0, 0xff, 0, 0);
 
-	endpoint = &intf->cur_altsetting->endpoint[0].desc;
-
 	usb_fill_int_urb(kbtab->irq, dev,
 			 usb_rcvintpipe(dev, endpoint->bEndpointAddress),
 			 kbtab->data, 8,
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index d61570d..48304e2 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -1672,6 +1672,8 @@ static int usbtouch_probe(struct usb_interface *intf,
 	if (!usbtouch || !input_dev)
 		goto out_free;
 
+	mutex_init(&usbtouch->pm_mutex);
+
 	type = &usbtouch_dev_info[id->driver_info];
 	usbtouch->type = type;
 	if (!type->process_pkt)
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 3a1d303..66b4800 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1710,7 +1710,7 @@ static const struct attribute_group *amd_iommu_groups[] = {
 	NULL,
 };
 
-static int iommu_init_pci(struct amd_iommu *iommu)
+static int __init iommu_init_pci(struct amd_iommu *iommu)
 {
 	int cap_ptr = iommu->cap_ptr;
 	u32 range, misc, low, high;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index ee30e89..9ba73e1 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -2883,7 +2883,7 @@ static int its_vpe_init(struct its_vpe *vpe)
 
 	if (!its_alloc_vpe_table(vpe_id)) {
 		its_vpe_id_free(vpe_id);
-		its_free_pending_table(vpe->vpt_page);
+		its_free_pending_table(vpt_page);
 		return -ENOMEM;
 	}
 
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index 4760307..cef8f5e 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -131,6 +131,7 @@ static struct irq_chip gpcv2_irqchip_data_chip = {
 	.irq_unmask		= imx_gpcv2_irq_unmask,
 	.irq_set_wake		= imx_gpcv2_irq_set_wake,
 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
+	.irq_set_type		= irq_chip_set_type_parent,
 #ifdef CONFIG_SMP
 	.irq_set_affinity	= irq_chip_set_affinity_parent,
 #endif
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 060dc7f..c952002 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -1406,6 +1406,7 @@ start_isoc_chain(struct usb_fifo *fifo, int num_packets_per_urb,
 				printk(KERN_DEBUG
 				       "%s: %s: alloc urb for fifo %i failed",
 				       hw->name, __func__, fifo->fifonum);
+				continue;
 			}
 			fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo;
 			fifo->iso[i].indx = i;
@@ -1704,13 +1705,23 @@ hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel)
 static int
 setup_hfcsusb(struct hfcsusb *hw)
 {
+	void *dmabuf = kmalloc(sizeof(u_char), GFP_KERNEL);
 	u_char b;
+	int ret;
 
 	if (debug & DBG_HFC_CALL_TRACE)
 		printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
 
+	if (!dmabuf)
+		return -ENOMEM;
+
+	ret = read_reg_atomic(hw, HFCUSB_CHIP_ID, dmabuf);
+
+	memcpy(&b, dmabuf, sizeof(u_char));
+	kfree(dmabuf);
+
 	/* check the chip id */
-	if (read_reg_atomic(hw, HFCUSB_CHIP_ID, &b) != 1) {
+	if (ret != 1) {
 		printk(KERN_DEBUG "%s: %s: cannot read chip id\n",
 		       hw->name, __func__);
 		return 1;
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index f6fd115..b3cb7fe 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -132,6 +132,7 @@ static const struct of_device_id qcom_apcs_ipc_of_match[] = {
 	{ .compatible = "qcom,sm8150-apcs-hmss-global", .data = (void *) 12 },
 	{ .compatible = "qcom,sm8150-spcs-global", .data = (void *)0 },
 	{ .compatible = "qcom,kona-spcs-global", .data = (void *)0 },
+	{ .compatible = "qcom,bengal-apcs-hmss-global", .data = (void *)8 },
 	{}
 };
 MODULE_DEVICE_TABLE(of, qcom_apcs_ipc_of_match);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index b1d0ae2..dc385b7 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1602,7 +1602,9 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 	unsigned long freed;
 
 	c = container_of(shrink, struct dm_bufio_client, shrinker);
-	if (!dm_bufio_trylock(c))
+	if (sc->gfp_mask & __GFP_FS)
+		dm_bufio_lock(c);
+	else if (!dm_bufio_trylock(c))
 		return SHRINK_STOP;
 
 	freed  = __scan(c, sc->nr_to_scan, sc->gfp_mask);
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 7d480c9..7e426e4 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -130,6 +130,7 @@ struct mapped_device {
 };
 
 int md_in_flight(struct mapped_device *md);
+void disable_discard(struct mapped_device *md);
 void disable_write_same(struct mapped_device *md);
 void disable_write_zeroes(struct mapped_device *md);
 
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 562b621..e71aecc 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1760,7 +1760,22 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
 			queue_work(ic->wait_wq, &dio->work);
 			return;
 		}
+		if (journal_read_pos != NOT_FOUND)
+			dio->range.n_sectors = ic->sectors_per_block;
 		wait_and_add_new_range(ic, &dio->range);
+		/*
+		 * wait_and_add_new_range drops the spinlock, so the journal
+		 * may have been changed arbitrarily. We need to recheck.
+		 * To simplify the code, we restrict I/O size to just one block.
+		 */
+		if (journal_read_pos != NOT_FOUND) {
+			sector_t next_sector;
+			unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
+			if (unlikely(new_pos != journal_read_pos)) {
+				remove_range_unlocked(ic, &dio->range);
+				goto retry;
+			}
+		}
 	}
 	spin_unlock_irq(&ic->endio_wait.lock);
 
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 671c243..3f694d9 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -548,8 +548,10 @@ static int run_io_job(struct kcopyd_job *job)
 	 * no point in continuing.
 	 */
 	if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
-	    job->master_job->write_err)
+	    job->master_job->write_err) {
+		job->write_err = job->master_job->write_err;
 		return -EIO;
+	}
 
 	io_job_start(job->kc->throttle);
 
@@ -601,6 +603,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
 			else
 				job->read_err = 1;
 			push(&kc->complete_jobs, job);
+			wake(kc);
 			break;
 		}
 
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index c44925e..b78a8a4 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3199,7 +3199,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 			  */
 			r = rs_prepare_reshape(rs);
 			if (r)
-				return r;
+				goto bad;
 
 			/* Reshaping ain't recovery, so disable recovery */
 			rs_setup_recovery(rs, MaxSector);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 6e547b8..264b84e 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -295,11 +295,14 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
 	}
 
 	if (unlikely(error == BLK_STS_TARGET)) {
-		if (req_op(clone) == REQ_OP_WRITE_SAME &&
-		    !clone->q->limits.max_write_same_sectors)
+		if (req_op(clone) == REQ_OP_DISCARD &&
+		    !clone->q->limits.max_discard_sectors)
+			disable_discard(tio->md);
+		else if (req_op(clone) == REQ_OP_WRITE_SAME &&
+			 !clone->q->limits.max_write_same_sectors)
 			disable_write_same(tio->md);
-		if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
-		    !clone->q->limits.max_write_zeroes_sectors)
+		else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
+			 !clone->q->limits.max_write_zeroes_sectors)
 			disable_write_zeroes(tio->md);
 	}
 
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 59d17c4..96343c7 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1350,7 +1350,7 @@ void dm_table_event(struct dm_table *t)
 }
 EXPORT_SYMBOL(dm_table_event);
 
-sector_t dm_table_get_size(struct dm_table *t)
+inline sector_t dm_table_get_size(struct dm_table *t)
 {
 	return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
 }
@@ -1375,6 +1375,9 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
 	unsigned int l, n = 0, k = 0;
 	sector_t *node;
 
+	if (unlikely(sector >= dm_table_get_size(t)))
+		return &t->targets[t->num_targets];
+
 	for (l = 0; l < t->depth; l++) {
 		n = get_child(n, k);
 		node = get_node(t, l, n);
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 4cdde7a..7e8d7fc 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -401,15 +401,18 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
 	sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
 	struct bio *bio;
 
+	if (dmz_bdev_is_dying(zmd->dev))
+		return ERR_PTR(-EIO);
+
 	/* Get a new block and a BIO to read it */
 	mblk = dmz_alloc_mblock(zmd, mblk_no);
 	if (!mblk)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
 	bio = bio_alloc(GFP_NOIO, 1);
 	if (!bio) {
 		dmz_free_mblock(zmd, mblk);
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 	}
 
 	spin_lock(&zmd->mblk_lock);
@@ -540,8 +543,8 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
 	if (!mblk) {
 		/* Cache miss: read the block from disk */
 		mblk = dmz_get_mblock_slow(zmd, mblk_no);
-		if (!mblk)
-			return ERR_PTR(-ENOMEM);
+		if (IS_ERR(mblk))
+			return mblk;
 	}
 
 	/* Wait for on-going read I/O and check for error */
@@ -569,16 +572,19 @@ static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
 /*
  * Issue a metadata block write BIO.
  */
-static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
-			     unsigned int set)
+static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
+			    unsigned int set)
 {
 	sector_t block = zmd->sb[set].block + mblk->no;
 	struct bio *bio;
 
+	if (dmz_bdev_is_dying(zmd->dev))
+		return -EIO;
+
 	bio = bio_alloc(GFP_NOIO, 1);
 	if (!bio) {
 		set_bit(DMZ_META_ERROR, &mblk->state);
-		return;
+		return -ENOMEM;
 	}
 
 	set_bit(DMZ_META_WRITING, &mblk->state);
@@ -590,6 +596,8 @@ static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
 	bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
 	bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
 	submit_bio(bio);
+
+	return 0;
 }
 
 /*
@@ -601,6 +609,9 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
 	struct bio *bio;
 	int ret;
 
+	if (dmz_bdev_is_dying(zmd->dev))
+		return -EIO;
+
 	bio = bio_alloc(GFP_NOIO, 1);
 	if (!bio)
 		return -ENOMEM;
@@ -658,22 +669,29 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
 {
 	struct dmz_mblock *mblk;
 	struct blk_plug plug;
-	int ret = 0;
+	int ret = 0, nr_mblks_submitted = 0;
 
 	/* Issue writes */
 	blk_start_plug(&plug);
-	list_for_each_entry(mblk, write_list, link)
-		dmz_write_mblock(zmd, mblk, set);
+	list_for_each_entry(mblk, write_list, link) {
+		ret = dmz_write_mblock(zmd, mblk, set);
+		if (ret)
+			break;
+		nr_mblks_submitted++;
+	}
 	blk_finish_plug(&plug);
 
 	/* Wait for completion */
 	list_for_each_entry(mblk, write_list, link) {
+		if (!nr_mblks_submitted)
+			break;
 		wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
 			       TASK_UNINTERRUPTIBLE);
 		if (test_bit(DMZ_META_ERROR, &mblk->state)) {
 			clear_bit(DMZ_META_ERROR, &mblk->state);
 			ret = -EIO;
 		}
+		nr_mblks_submitted--;
 	}
 
 	/* Flush drive cache (this will also sync data) */
@@ -735,6 +753,11 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
 	 */
 	dmz_lock_flush(zmd);
 
+	if (dmz_bdev_is_dying(zmd->dev)) {
+		ret = -EIO;
+		goto out;
+	}
+
 	/* Get dirty blocks */
 	spin_lock(&zmd->mblk_lock);
 	list_splice_init(&zmd->mblk_dirty_list, &write_list);
@@ -1534,7 +1557,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
 	struct dm_zone *zone;
 
 	if (list_empty(&zmd->map_rnd_list))
-		return NULL;
+		return ERR_PTR(-EBUSY);
 
 	list_for_each_entry(zone, &zmd->map_rnd_list, link) {
 		if (dmz_is_buf(zone))
@@ -1545,7 +1568,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
 			return dzone;
 	}
 
-	return NULL;
+	return ERR_PTR(-EBUSY);
 }
 
 /*
@@ -1556,7 +1579,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
 	struct dm_zone *zone;
 
 	if (list_empty(&zmd->map_seq_list))
-		return NULL;
+		return ERR_PTR(-EBUSY);
 
 	list_for_each_entry(zone, &zmd->map_seq_list, link) {
 		if (!zone->bzone)
@@ -1565,7 +1588,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
 			return zone;
 	}
 
-	return NULL;
+	return ERR_PTR(-EBUSY);
 }
 
 /*
@@ -1623,6 +1646,10 @@ struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd, unsigned int chu
 		/* Alloate a random zone */
 		dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
 		if (!dzone) {
+			if (dmz_bdev_is_dying(zmd->dev)) {
+				dzone = ERR_PTR(-EIO);
+				goto out;
+			}
 			dmz_wait_for_free_zones(zmd);
 			goto again;
 		}
@@ -1720,6 +1747,10 @@ struct dm_zone *dmz_get_chunk_buffer(struct dmz_metadata *zmd,
 	/* Alloate a random zone */
 	bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
 	if (!bzone) {
+		if (dmz_bdev_is_dying(zmd->dev)) {
+			bzone = ERR_PTR(-EIO);
+			goto out;
+		}
 		dmz_wait_for_free_zones(zmd);
 		goto again;
 	}
diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
index edf4b95..9470b8f 100644
--- a/drivers/md/dm-zoned-reclaim.c
+++ b/drivers/md/dm-zoned-reclaim.c
@@ -37,7 +37,7 @@ enum {
 /*
  * Number of seconds of target BIO inactivity to consider the target idle.
  */
-#define DMZ_IDLE_PERIOD		(10UL * HZ)
+#define DMZ_IDLE_PERIOD			(10UL * HZ)
 
 /*
  * Percentage of unmapped (free) random zones below which reclaim starts
@@ -134,6 +134,9 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
 		set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
 
 	while (block < end_block) {
+		if (dev->flags & DMZ_BDEV_DYING)
+			return -EIO;
+
 		/* Get a valid region from the source zone */
 		ret = dmz_first_valid_block(zmd, src_zone, &block);
 		if (ret <= 0)
@@ -215,7 +218,7 @@ static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
 
 	dmz_unlock_flush(zmd);
 
-	return 0;
+	return ret;
 }
 
 /*
@@ -259,7 +262,7 @@ static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
 
 	dmz_unlock_flush(zmd);
 
-	return 0;
+	return ret;
 }
 
 /*
@@ -312,7 +315,7 @@ static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
 
 	dmz_unlock_flush(zmd);
 
-	return 0;
+	return ret;
 }
 
 /*
@@ -334,7 +337,7 @@ static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
 /*
  * Find a candidate zone for reclaim and process it.
  */
-static void dmz_reclaim(struct dmz_reclaim *zrc)
+static int dmz_do_reclaim(struct dmz_reclaim *zrc)
 {
 	struct dmz_metadata *zmd = zrc->metadata;
 	struct dm_zone *dzone;
@@ -344,8 +347,8 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
 
 	/* Get a data zone */
 	dzone = dmz_get_zone_for_reclaim(zmd);
-	if (!dzone)
-		return;
+	if (IS_ERR(dzone))
+		return PTR_ERR(dzone);
 
 	start = jiffies;
 
@@ -391,13 +394,20 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
 out:
 	if (ret) {
 		dmz_unlock_zone_reclaim(dzone);
-		return;
+		return ret;
 	}
 
-	(void) dmz_flush_metadata(zrc->metadata);
+	ret = dmz_flush_metadata(zrc->metadata);
+	if (ret) {
+		dmz_dev_debug(zrc->dev,
+			      "Metadata flush for zone %u failed, err %d\n",
+			      dmz_id(zmd, rzone), ret);
+		return ret;
+	}
 
 	dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms",
 		      dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start));
+	return 0;
 }
 
 /*
@@ -442,6 +452,10 @@ static void dmz_reclaim_work(struct work_struct *work)
 	struct dmz_metadata *zmd = zrc->metadata;
 	unsigned int nr_rnd, nr_unmap_rnd;
 	unsigned int p_unmap_rnd;
+	int ret;
+
+	if (dmz_bdev_is_dying(zrc->dev))
+		return;
 
 	if (!dmz_should_reclaim(zrc)) {
 		mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
@@ -471,7 +485,17 @@ static void dmz_reclaim_work(struct work_struct *work)
 		      (dmz_target_idle(zrc) ? "Idle" : "Busy"),
 		      p_unmap_rnd, nr_unmap_rnd, nr_rnd);
 
-	dmz_reclaim(zrc);
+	ret = dmz_do_reclaim(zrc);
+	if (ret) {
+		dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret);
+		if (ret == -EIO)
+			/*
+			 * LLD might be performing some error handling sequence
+			 * at the underlying device. To not interfere, do not
+			 * attempt to schedule the next reclaim run immediately.
+			 */
+			return;
+	}
 
 	dmz_schedule_reclaim(zrc);
 }
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 85fb2ba..1030c42a 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -133,6 +133,8 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
 
 	atomic_inc(&bioctx->ref);
 	generic_make_request(clone);
+	if (clone->bi_status == BLK_STS_IOERR)
+		return -EIO;
 
 	if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
 		zone->wp_block += nr_blocks;
@@ -277,8 +279,8 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz,
 
 	/* Get the buffer zone. One will be allocated if needed */
 	bzone = dmz_get_chunk_buffer(zmd, zone);
-	if (!bzone)
-		return -ENOSPC;
+	if (IS_ERR(bzone))
+		return PTR_ERR(bzone);
 
 	if (dmz_is_readonly(bzone))
 		return -EROFS;
@@ -389,6 +391,11 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
 
 	dmz_lock_metadata(zmd);
 
+	if (dmz->dev->flags & DMZ_BDEV_DYING) {
+		ret = -EIO;
+		goto out;
+	}
+
 	/*
 	 * Get the data zone mapping the chunk. There may be no
 	 * mapping for read and discard. If a mapping is obtained,
@@ -493,6 +500,8 @@ static void dmz_flush_work(struct work_struct *work)
 
 	/* Flush dirty metadata blocks */
 	ret = dmz_flush_metadata(dmz->metadata);
+	if (ret)
+		dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret);
 
 	/* Process queued flush requests */
 	while (1) {
@@ -513,22 +522,24 @@ static void dmz_flush_work(struct work_struct *work)
  * Get a chunk work and start it to process a new BIO.
  * If the BIO chunk has no work yet, create one.
  */
-static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
+static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
 {
 	unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
 	struct dm_chunk_work *cw;
+	int ret = 0;
 
 	mutex_lock(&dmz->chunk_lock);
 
 	/* Get the BIO chunk work. If one is not active yet, create one */
 	cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
 	if (!cw) {
-		int ret;
 
 		/* Create a new chunk work */
 		cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
-		if (!cw)
+		if (unlikely(!cw)) {
+			ret = -ENOMEM;
 			goto out;
+		}
 
 		INIT_WORK(&cw->work, dmz_chunk_work);
 		atomic_set(&cw->refcount, 0);
@@ -539,7 +550,6 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
 		ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
 		if (unlikely(ret)) {
 			kfree(cw);
-			cw = NULL;
 			goto out;
 		}
 	}
@@ -547,10 +557,38 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
 	bio_list_add(&cw->bio_list, bio);
 	dmz_get_chunk_work(cw);
 
+	dmz_reclaim_bio_acc(dmz->reclaim);
 	if (queue_work(dmz->chunk_wq, &cw->work))
 		dmz_get_chunk_work(cw);
 out:
 	mutex_unlock(&dmz->chunk_lock);
+	return ret;
+}
+
+/*
+ * Check the backing device availability. If it's on the way out,
+ * start failing I/O. Reclaim and metadata components also call this
+ * function to cleanly abort operation in the event of such failure.
+ */
+bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
+{
+	struct gendisk *disk;
+
+	if (!(dmz_dev->flags & DMZ_BDEV_DYING)) {
+		disk = dmz_dev->bdev->bd_disk;
+		if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
+			dmz_dev_warn(dmz_dev, "Backing device queue dying");
+			dmz_dev->flags |= DMZ_BDEV_DYING;
+		} else if (disk->fops->check_events) {
+			if (disk->fops->check_events(disk, 0) &
+					DISK_EVENT_MEDIA_CHANGE) {
+				dmz_dev_warn(dmz_dev, "Backing device offline");
+				dmz_dev->flags |= DMZ_BDEV_DYING;
+			}
+		}
+	}
+
+	return dmz_dev->flags & DMZ_BDEV_DYING;
 }
 
 /*
@@ -564,6 +602,10 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
 	sector_t sector = bio->bi_iter.bi_sector;
 	unsigned int nr_sectors = bio_sectors(bio);
 	sector_t chunk_sector;
+	int ret;
+
+	if (dmz_bdev_is_dying(dmz->dev))
+		return DM_MAPIO_KILL;
 
 	dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
 		      bio_op(bio), (unsigned long long)sector, nr_sectors,
@@ -601,8 +643,14 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
 		dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
 
 	/* Now ready to handle this BIO */
-	dmz_reclaim_bio_acc(dmz->reclaim);
-	dmz_queue_chunk_work(dmz, bio);
+	ret = dmz_queue_chunk_work(dmz, bio);
+	if (ret) {
+		dmz_dev_debug(dmz->dev,
+			      "BIO op %d, can't process chunk %llu, err %i\n",
+			      bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
+			      ret);
+		return DM_MAPIO_REQUEUE;
+	}
 
 	return DM_MAPIO_SUBMITTED;
 }
@@ -856,6 +904,9 @@ static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
 {
 	struct dmz_target *dmz = ti->private;
 
+	if (dmz_bdev_is_dying(dmz->dev))
+		return -ENODEV;
+
 	*bdev = dmz->dev->bdev;
 
 	return 0;
diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h
index ed8de49..93a6452 100644
--- a/drivers/md/dm-zoned.h
+++ b/drivers/md/dm-zoned.h
@@ -56,6 +56,8 @@ struct dmz_dev {
 
 	unsigned int		nr_zones;
 
+	unsigned int		flags;
+
 	sector_t		zone_nr_sectors;
 	unsigned int		zone_nr_sectors_shift;
 
@@ -67,6 +69,9 @@ struct dmz_dev {
 				 (dev)->zone_nr_sectors_shift)
 #define dmz_chunk_block(dev, b)	((b) & ((dev)->zone_nr_blocks - 1))
 
+/* Device flags. */
+#define DMZ_BDEV_DYING		(1 << 0)
+
 /*
  * Zone descriptor.
  */
@@ -245,4 +250,9 @@ void dmz_resume_reclaim(struct dmz_reclaim *zrc);
 void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc);
 void dmz_schedule_reclaim(struct dmz_reclaim *zrc);
 
+/*
+ * Functions defined in dm-zoned-target.c
+ */
+bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev);
+
 #endif /* DM_ZONED_H */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 42768fe..c9860e3 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -910,6 +910,15 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
 	}
 }
 
+void disable_discard(struct mapped_device *md)
+{
+	struct queue_limits *limits = dm_get_queue_limits(md);
+
+	/* device doesn't really support DISCARD, disable it */
+	limits->max_discard_sectors = 0;
+	blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
+}
+
 void disable_write_same(struct mapped_device *md)
 {
 	struct queue_limits *limits = dm_get_queue_limits(md);
@@ -935,11 +944,14 @@ static void clone_endio(struct bio *bio)
 	dm_endio_fn endio = tio->ti->type->end_io;
 
 	if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
-		if (bio_op(bio) == REQ_OP_WRITE_SAME &&
-		    !bio->bi_disk->queue->limits.max_write_same_sectors)
+		if (bio_op(bio) == REQ_OP_DISCARD &&
+		    !bio->bi_disk->queue->limits.max_discard_sectors)
+			disable_discard(md);
+		else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
+			 !bio->bi_disk->queue->limits.max_write_same_sectors)
 			disable_write_same(md);
-		if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
-		    !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
+		else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
+			 !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
 			disable_write_zeroes(md);
 	}
 
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 58b3197..8aae062 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -628,39 +628,40 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
 
 	new_parent = shadow_current(s);
 
+	pn = dm_block_data(new_parent);
+	size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
+		sizeof(__le64) : s->info->value_type.size;
+
+	/* create & init the left block */
 	r = new_block(s->info, &left);
 	if (r < 0)
 		return r;
 
+	ln = dm_block_data(left);
+	nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
+
+	ln->header.flags = pn->header.flags;
+	ln->header.nr_entries = cpu_to_le32(nr_left);
+	ln->header.max_entries = pn->header.max_entries;
+	ln->header.value_size = pn->header.value_size;
+	memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
+	memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
+
+	/* create & init the right block */
 	r = new_block(s->info, &right);
 	if (r < 0) {
 		unlock_block(s->info, left);
 		return r;
 	}
 
-	pn = dm_block_data(new_parent);
-	ln = dm_block_data(left);
 	rn = dm_block_data(right);
-
-	nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
 	nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
 
-	ln->header.flags = pn->header.flags;
-	ln->header.nr_entries = cpu_to_le32(nr_left);
-	ln->header.max_entries = pn->header.max_entries;
-	ln->header.value_size = pn->header.value_size;
-
 	rn->header.flags = pn->header.flags;
 	rn->header.nr_entries = cpu_to_le32(nr_right);
 	rn->header.max_entries = pn->header.max_entries;
 	rn->header.value_size = pn->header.value_size;
-
-	memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
 	memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
-
-	size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
-		sizeof(__le64) : s->info->value_type.size;
-	memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
 	memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
 	       nr_right * size);
 
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index aec44924..2532858 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -249,7 +249,7 @@ static int out(struct sm_metadata *smm)
 	}
 
 	if (smm->recursion_count == 1)
-		apply_bops(smm);
+		r = apply_bops(smm);
 
 	smm->recursion_count--;
 
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi.c b/drivers/media/platform/msm/cvp/cvp_hfi.c
index 24b6296..9b94868 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi.c
+++ b/drivers/media/platform/msm/cvp/cvp_hfi.c
@@ -505,7 +505,7 @@ static int __dsp_send_hfi_queue(struct iris_hfi_device *device)
 		device->dsp_iface_q_table.mem_data.size);
 	rc = cvp_dsp_send_cmd_hfi_queue(
 		(phys_addr_t *)device->dsp_iface_q_table.mem_data.dma_handle,
-		device->dsp_iface_q_table.mem_data.size);
+		device->dsp_iface_q_table.mem_data.size, device);
 	if (rc) {
 		dprintk(CVP_ERR, "%s: dsp hfi queue init failed\n", __func__);
 		return rc;
diff --git a/drivers/media/platform/msm/cvp/hfi_response_handler.c b/drivers/media/platform/msm/cvp/hfi_response_handler.c
index 393ff3c..00f2d7c 100644
--- a/drivers/media/platform/msm/cvp/hfi_response_handler.c
+++ b/drivers/media/platform/msm/cvp/hfi_response_handler.c
@@ -480,10 +480,18 @@ static int hfi_process_session_cvp_msg(u32 device_id,
 		if (pkt->packet_type == HFI_MSG_SESSION_CVP_DFS
 			|| pkt->packet_type == HFI_MSG_SESSION_CVP_DME
 			|| pkt->packet_type == HFI_MSG_SESSION_CVP_ICA
-			|| pkt->packet_type == HFI_MSG_SESSION_CVP_FD)
+			|| pkt->packet_type == HFI_MSG_SESSION_CVP_FD) {
+			u64 ktid;
+			u32 kdata1, kdata2;
+
+			kdata1 = pkt->client_data.kdata1;
+			kdata2 = pkt->client_data.kdata2;
+			ktid = ((u64)kdata2 << 32) | kdata1;
+			msm_cvp_unmap_buf_cpu(inst, ktid);
+
 			return _deprecated_hfi_msg_process(device_id,
 				pkt, info, inst);
-
+		}
 		dprintk(CVP_ERR, "Invalid deprecate_bitmask %#x\n",
 					inst->deprecate_bitmask);
 	}
diff --git a/drivers/media/platform/msm/cvp/msm_cvp.c b/drivers/media/platform/msm/cvp/msm_cvp.c
index 44aa779..8cb781d 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp.c
@@ -369,6 +369,75 @@ static void __msm_cvp_cache_operations(struct msm_cvp_internal_buffer *cbuf)
 				cbuf->buf.offset, cbuf->buf.size);
 }
 
+static int msm_cvp_map_buf_user_persist(struct msm_cvp_inst *inst,
+					struct cvp_buf_type *in_buf,
+					u32 *iova)
+{
+	int rc = 0;
+	struct cvp_internal_buf *cbuf;
+	struct dma_buf *dma_buf;
+
+	if (!inst || !iova) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	if (in_buf->fd > 0) {
+		dma_buf = msm_cvp_smem_get_dma_buf(in_buf->fd);
+		if (!dma_buf) {
+			dprintk(CVP_ERR, "%s: Invalid fd=%d", __func__,
+				in_buf->fd);
+			return -EINVAL;
+		}
+		in_buf->dbuf = dma_buf;
+		msm_cvp_smem_put_dma_buf(dma_buf);
+	}
+
+	rc = msm_cvp_session_get_iova_addr(inst, in_buf, iova);
+	if (!rc && *iova != 0)
+		return 0;
+	cbuf = kzalloc(sizeof(*cbuf), GFP_KERNEL);
+	if (!cbuf)
+		return -ENOMEM;
+
+	cbuf->smem.buffer_type = in_buf->flags;
+	cbuf->smem.fd = in_buf->fd;
+	cbuf->smem.size = in_buf->size;
+	cbuf->smem.flags = 0;
+	cbuf->smem.offset = 0;
+	cbuf->smem.dma_buf = in_buf->dbuf;
+	cbuf->buffer_ownership = CLIENT;
+
+	rc = msm_cvp_smem_map_dma_buf(inst, &cbuf->smem);
+	if (rc) {
+		dprintk(CVP_ERR,
+		"%s: %x : fd %d %s size %d",
+		"map persist failed", hash32_ptr(inst->session), cbuf->smem.fd,
+		cbuf->smem.dma_buf->name, cbuf->smem.size);
+		goto exit;
+	}
+
+	/* Assign mapped dma_buf back because it could be zero previously */
+	in_buf->dbuf = cbuf->smem.dma_buf;
+
+	mutex_lock(&inst->persistbufs.lock);
+	list_add_tail(&cbuf->list, &inst->persistbufs.list);
+	mutex_unlock(&inst->persistbufs.lock);
+
+	*iova = cbuf->smem.device_addr;
+
+	dprintk(CVP_DBG,
+	"%s: %x : fd %d %s size %d", "map persist", hash32_ptr(inst->session),
+	cbuf->smem.fd, cbuf->smem.dma_buf->name, cbuf->smem.size);
+	return rc;
+
+exit:
+	kfree(cbuf);
+	cbuf = NULL;
+
+	return rc;
+}
+
 static int msm_cvp_map_buf_cpu(struct msm_cvp_inst *inst,
 				struct cvp_buf_type *in_buf,
 				u32 *iova,
@@ -625,6 +694,56 @@ static int msm_cvp_session_receive_hfi(struct msm_cvp_inst *inst,
 	return rc;
 }
 
+static int msm_cvp_map_user_persist(struct msm_cvp_inst *inst,
+	struct cvp_kmd_hfi_packet *in_pkt,
+	unsigned int offset, unsigned int buf_num)
+{
+	struct cvp_buf_desc *buf_ptr;
+	struct cvp_buf_type *new_buf;
+	int i, rc = 0;
+	unsigned int iova;
+
+	if (!offset || !buf_num)
+		return 0;
+
+	for (i = 0; i < buf_num; i++) {
+		buf_ptr = (struct cvp_buf_desc *)
+				&in_pkt->pkt_data[offset];
+
+		offset += sizeof(*new_buf) >> 2;
+		new_buf = (struct cvp_buf_type *)buf_ptr;
+
+		/*
+		 * Make sure fd or dma_buf field doesn't have any
+		 * garbage value.
+		 */
+		if (inst->session_type == MSM_CVP_USER) {
+			new_buf->dbuf = 0;
+		} else if (inst->session_type == MSM_CVP_KERNEL) {
+			new_buf->fd = -1;
+		} else if (inst->session_type >= MSM_CVP_UNKNOWN) {
+			dprintk(CVP_ERR,
+				"%s: unknown session type %d\n",
+				__func__, inst->session_type);
+			return -EINVAL;
+		}
+
+		if (new_buf->fd <= 0 && !new_buf->dbuf)
+			continue;
+
+		rc = msm_cvp_map_buf_user_persist(inst, new_buf, &iova);
+		if (rc) {
+			dprintk(CVP_ERR,
+				"%s: buf %d register failed.\n",
+				__func__, i);
+
+			return rc;
+		}
+		new_buf->fd = iova;
+	}
+	return rc;
+}
+
 static int msm_cvp_map_buf(struct msm_cvp_inst *inst,
 	struct cvp_kmd_hfi_packet *in_pkt,
 	unsigned int offset, unsigned int buf_num)
@@ -818,7 +937,11 @@ static int msm_cvp_session_process_hfi(
 		buf_num = in_buf_num;
 	}
 
-	rc = msm_cvp_map_buf(inst, in_pkt, offset, buf_num);
+	if (in_pkt->pkt_data[1] == HFI_CMD_SESSION_CVP_SET_PERSIST_BUFFERS)
+		rc = msm_cvp_map_user_persist(inst, in_pkt, offset, buf_num);
+	else
+		rc = msm_cvp_map_buf(inst, in_pkt, offset, buf_num);
+
 	if (rc)
 		goto exit;
 
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_common.c b/drivers/media/platform/msm/cvp/msm_cvp_common.c
index ef3726a..2fb9850 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_common.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_common.c
@@ -1657,6 +1657,7 @@ static int allocate_and_set_internal_bufs(struct msm_cvp_inst *inst,
 	}
 
 	binfo->buffer_type = HFI_BUFFER_INTERNAL_PERSIST_1;
+	binfo->buffer_ownership = DRIVER;
 
 	rc = set_internal_buf_on_fw(inst, &binfo->smem, false);
 	if (rc)
@@ -1713,6 +1714,7 @@ int cvp_comm_release_persist_buffers(struct msm_cvp_inst *inst)
 	int rc = 0;
 	struct msm_cvp_core *core;
 	struct cvp_hfi_device *hdev;
+	int all_released;
 
 	if (!inst) {
 		dprintk(CVP_ERR, "Invalid instance pointer = %pK\n", inst);
@@ -1731,6 +1733,8 @@ int cvp_comm_release_persist_buffers(struct msm_cvp_inst *inst)
 	}
 
 	dprintk(CVP_DBG, "release persist buffer!\n");
+	all_released = 0;
+
 	mutex_lock(&inst->persistbufs.lock);
 	list_for_each_safe(ptr, next, &inst->persistbufs.list) {
 		buf = list_entry(ptr, struct cvp_internal_buf, list);
@@ -1740,36 +1744,49 @@ int cvp_comm_release_persist_buffers(struct msm_cvp_inst *inst)
 			mutex_unlock(&inst->persistbufs.lock);
 			return -EINVAL;
 		}
-		if (inst->state > MSM_CVP_CLOSE_DONE) {
-			list_del(&buf->list);
-			msm_cvp_smem_free(handle);
-			kfree(buf);
-			continue;
-		}
-		buffer_info.buffer_size = handle->size;
-		buffer_info.buffer_type = buf->buffer_type;
-		buffer_info.num_buffers = 1;
-		buffer_info.align_device_addr = handle->device_addr;
-		buffer_info.response_required = true;
-		rc = call_hfi_op(hdev, session_release_buffers,
-				(void *)inst->session, &buffer_info);
-		if (!rc) {
-			mutex_unlock(&inst->persistbufs.lock);
-			rc = wait_for_sess_signal_receipt(inst,
+
+		/* Workaround for FW: release buffer means release all */
+		if (inst->state <= MSM_CVP_CLOSE_DONE && !all_released) {
+			buffer_info.buffer_size = handle->size;
+			buffer_info.buffer_type = buf->buffer_type;
+			buffer_info.num_buffers = 1;
+			buffer_info.align_device_addr = handle->device_addr;
+			buffer_info.response_required = true;
+			rc = call_hfi_op(hdev, session_release_buffers,
+					(void *)inst->session, &buffer_info);
+			if (!rc) {
+				mutex_unlock(&inst->persistbufs.lock);
+				rc = wait_for_sess_signal_receipt(inst,
 					HAL_SESSION_RELEASE_BUFFER_DONE);
-			if (rc)
-				dprintk(CVP_WARN,
+				if (rc)
+					dprintk(CVP_WARN,
 					"%s: wait for signal failed, rc %d\n",
-						__func__, rc);
-			mutex_lock(&inst->persistbufs.lock);
-		} else {
-			dprintk(CVP_WARN,
-					"Rel prst buf fail:%x, %d\n",
-					buffer_info.align_device_addr,
-					buffer_info.buffer_size);
+					__func__, rc);
+				mutex_lock(&inst->persistbufs.lock);
+			} else {
+				dprintk(CVP_WARN,
+						"Rel prst buf fail:%x, %d\n",
+						buffer_info.align_device_addr,
+						buffer_info.buffer_size);
+			}
+			all_released = 1;
 		}
 		list_del(&buf->list);
-		msm_cvp_smem_free(handle);
+
+		if (buf->buffer_ownership == DRIVER) {
+			dprintk(CVP_DBG,
+			"%s: %x : fd %d %s size %d",
+			"free arp", hash32_ptr(inst->session), buf->smem.fd,
+			buf->smem.dma_buf->name, buf->smem.size);
+			msm_cvp_smem_free(handle);
+		} else if (buf->buffer_ownership == CLIENT) {
+			dprintk(CVP_DBG,
+			"%s: %x : fd %d %s size %d",
+			"unmap persist", hash32_ptr(inst->session),
+			buf->smem.fd, buf->smem.dma_buf->name, buf->smem.size);
+			msm_cvp_smem_unmap_dma_buf(inst, &buf->smem);
+		}
+
 		kfree(buf);
 	}
 	mutex_unlock(&inst->persistbufs.lock);
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_common.h b/drivers/media/platform/msm/cvp/msm_cvp_common.h
index aac667d..99dd3fd 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_common.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_common.h
@@ -45,4 +45,5 @@ int cvp_comm_set_arp_buffers(struct msm_cvp_inst *inst);
 int cvp_comm_release_persist_buffers(struct msm_cvp_inst *inst);
 void print_client_buffer(u32 tag, const char *str,
 		struct msm_cvp_inst *inst, struct cvp_kmd_buffer *cbuf);
+void msm_cvp_unmap_buf_cpu(struct msm_cvp_inst *inst, u64 ktid);
 #endif
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_core.c b/drivers/media/platform/msm/cvp/msm_cvp_core.c
index 365f0a8..cde1eaa 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_core.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_core.c
@@ -213,8 +213,9 @@ static bool msm_cvp_check_for_inst_overload(struct msm_cvp_core *core)
 
 	/* Instance count includes current instance as well. */
 
-	if ((instance_count > core->resources.max_inst_count) ||
-		(secure_instance_count > core->resources.max_secure_inst_count))
+	if ((instance_count >= core->resources.max_inst_count) ||
+		(secure_instance_count >=
+			core->resources.max_secure_inst_count))
 		overload = true;
 	return overload;
 }
@@ -273,6 +274,19 @@ void *msm_cvp_open(int core_id, int session_type)
 		goto err_invalid_core;
 	}
 
+	core->resources.max_inst_count = MAX_SUPPORTED_INSTANCES;
+	if (msm_cvp_check_for_inst_overload(core)) {
+		dprintk(CVP_ERR, "Instance num reached Max, rejecting session");
+		mutex_lock(&core->lock);
+		list_for_each_entry(inst, &core->instances, list)
+			dprintk(CVP_ERR, "inst %pK, cmd %d id %d\n",
+				inst, inst->cur_cmd_type,
+				hash32_ptr(inst->session));
+		mutex_unlock(&core->lock);
+
+		return NULL;
+	}
+
 	inst = kzalloc(sizeof(*inst), GFP_KERNEL);
 	if (!inst) {
 		dprintk(CVP_ERR, "Failed to allocate memory\n");
@@ -331,19 +345,6 @@ void *msm_cvp_open(int core_id, int session_type)
 		goto fail_init;
 	}
 
-	core->resources.max_inst_count = MAX_SUPPORTED_INSTANCES;
-	if (msm_cvp_check_for_inst_overload(core)) {
-		dprintk(CVP_ERR, "Instance num reached Max, rejecting session");
-		mutex_lock(&core->lock);
-		list_for_each_entry(inst, &core->instances, list)
-			dprintk(CVP_ERR, "inst %pK, cmd %d id %d\n",
-				inst, inst->cur_cmd_type,
-				hash32_ptr(inst->session));
-		mutex_unlock(&core->lock);
-
-		goto fail_init;
-	}
-
 	inst->debugfs_root =
 		msm_cvp_debugfs_init_inst(inst, core->debugfs_root);
 
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_dsp.c b/drivers/media/platform/msm/cvp/msm_cvp_dsp.c
index 8218282..4b6d92e 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_dsp.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_dsp.c
@@ -63,6 +63,8 @@ struct cvp_dsp_apps {
 	struct completion dereg_buffer_work;
 	struct completion shutdown_work;
 	struct completion cmdqueue_send_work;
+	struct work_struct ssr_work;
+	struct iris_hfi_device *device;
 };
 
 
@@ -88,6 +90,40 @@ static int cvp_dsp_send_cmd(void *msg, uint32_t len)
 	return err;
 }
 
+void msm_cvp_cdsp_ssr_handler(struct work_struct *work)
+{
+	struct cvp_dsp_apps *me;
+	uint64_t msg_ptr;
+	uint32_t msg_ptr_len;
+	int err;
+
+	me = container_of(work, struct cvp_dsp_apps, ssr_work);
+	if (!me) {
+		dprintk(CVP_ERR, "%s: Invalid params\n", __func__);
+		return;
+	}
+
+	msg_ptr = cmd_msg.msg_ptr;
+	msg_ptr_len =  cmd_msg.msg_ptr_len;
+
+	err = cvp_dsp_send_cmd_hfi_queue((phys_addr_t *)msg_ptr,
+					msg_ptr_len,
+					(void *)NULL);
+	if (err) {
+		dprintk(CVP_ERR,
+			"%s: Failed to send HFI Queue address. err=%d\n",
+			__func__, err);
+		return;
+	}
+
+	if (me->device) {
+		mutex_lock(&me->device->lock);
+		me->device->dsp_flags |= DSP_INIT;
+		mutex_unlock(&me->device->lock);
+	}
+	dprintk(CVP_DBG, "%s: dsp recover from SSR successfully\n", __func__);
+}
+
 static int cvp_dsp_rpmsg_probe(struct rpmsg_device *rpdev)
 {
 	int err = 0;
@@ -124,14 +160,7 @@ static int cvp_dsp_rpmsg_probe(struct rpmsg_device *rpdev)
 				__func__, err);
 			return err;
 		}
-		err = cvp_dsp_send_cmd_hfi_queue(
-			(phys_addr_t *)msg_ptr, msg_ptr_len);
-		if (err) {
-			dprintk(CVP_ERR,
-				"%s: Failed to send HFI Queue address. err=%d\n",
-				__func__, err);
-			goto bail;
-		}
+		schedule_work(&me->ssr_work);
 		mutex_lock(&me->smd_mutex);
 		cdsp_state = me->cdsp_state;
 		mutex_unlock(&me->smd_mutex);
@@ -148,9 +177,15 @@ static void cvp_dsp_rpmsg_remove(struct rpmsg_device *rpdev)
 {
 	struct cvp_dsp_apps *me = &gfa_cv;
 
+	cancel_work_sync(&me->ssr_work);
 	mutex_lock(&me->smd_mutex);
 	me->chan = NULL;
 	me->cdsp_state = STATUS_SSR;
+	if (me->device) {
+		mutex_lock(&me->device->lock);
+		me->device->dsp_flags &= ~DSP_INIT;
+		mutex_unlock(&me->device->lock);
+	}
 	mutex_unlock(&me->smd_mutex);
 	dprintk(CVP_INFO,
 		"%s: CDSP SSR triggered\n", __func__);
@@ -193,7 +228,8 @@ static int cvp_dsp_rpmsg_callback(struct rpmsg_device *rpdev,
 }
 
 int cvp_dsp_send_cmd_hfi_queue(phys_addr_t *phys_addr,
-	uint32_t size_in_bytes)
+				uint32_t size_in_bytes,
+				struct iris_hfi_device *device)
 {
 	int err, timeout;
 	struct msm_cvp_core *core;
@@ -219,6 +255,7 @@ int cvp_dsp_send_cmd_hfi_queue(phys_addr_t *phys_addr,
 	mutex_lock(&me->smd_mutex);
 	cmd_msg.msg_ptr = (uint64_t)phys_addr;
 	cmd_msg.msg_ptr_len = (size_in_bytes);
+	me->device = device;
 	mutex_unlock(&me->smd_mutex);
 
 	dprintk(CVP_DBG,
@@ -489,6 +526,7 @@ static int __init cvp_dsp_device_init(void)
 	init_completion(&me->cmdqueue_send_work);
 	me->cvp_shutdown = STATUS_INIT;
 	me->cdsp_state = STATUS_INIT;
+	INIT_WORK(&me->ssr_work, msm_cvp_cdsp_ssr_handler);
 	err = register_rpmsg_driver(&cvp_dsp_rpmsg_client);
 	if (err) {
 		dprintk(CVP_ERR,
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_dsp.h b/drivers/media/platform/msm/cvp/msm_cvp_dsp.h
index 0fb3567..ee7a847 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_dsp.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_dsp.h
@@ -8,6 +8,7 @@
 
 #include <linux/types.h>
 #include "msm_cvp_debug.h"
+#include "cvp_core_hfi.h"
 
 #define CVP_APPS_DSP_GLINK_GUID "cvp-glink-apps-dsp"
 #define CVP_APPS_DSP_SMD_GUID "cvp-smd-apps-dsp"
@@ -23,7 +24,7 @@
  * Size in bytes of command message queue
  */
 int cvp_dsp_send_cmd_hfi_queue(phys_addr_t *phys_addr,
-	uint32_t size_in_bytes);
+	uint32_t size_in_bytes, struct iris_hfi_device *device);
 
 /*
  * API for CVP driver to suspend CVP session during
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c
index 3226e9b..d08e998 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c
@@ -77,7 +77,7 @@ static int mpq_sw_dmx_write_to_decoder(struct dvb_demux_feed *feed,
 }
 
 static int mpq_sw_dmx_set_source(struct dmx_demux *demux,
-		const dmx_source_t *src)
+		const enum dmx_source_t *src)
 {
 	int ret = -EINVAL;
 
diff --git a/drivers/media/platform/msm/npu/npu_common.h b/drivers/media/platform/msm/npu/npu_common.h
index 6bea45e..bafab3f 100644
--- a/drivers/media/platform/msm/npu/npu_common.h
+++ b/drivers/media/platform/msm/npu/npu_common.h
@@ -45,6 +45,7 @@
 #define NPU_MAX_DT_NAME_LEN	    21
 #define NPU_MAX_PWRLEVELS		8
 #define NPU_MAX_STATS_BUF_SIZE 16384
+#define NPU_MAX_BW_DEVS			4
 
 enum npu_power_level {
 	NPU_PWRLEVEL_MINSVS = 0,
@@ -174,7 +175,9 @@ struct npu_pwrctrl {
 	uint32_t min_pwrlevel;
 	uint32_t num_pwrlevels;
 
-	struct device *devbw;
+	struct device *devbw[NPU_MAX_BW_DEVS];
+	uint32_t devbw_num;
+	uint32_t bwmon_enabled;
 	uint32_t uc_pwrlevel;
 	uint32_t cdsprm_pwrlevel;
 	uint32_t fmax_pwrlevel;
diff --git a/drivers/media/platform/msm/npu/npu_dbg.c b/drivers/media/platform/msm/npu/npu_dbg.c
index d81ffa1..988d177 100644
--- a/drivers/media/platform/msm/npu/npu_dbg.c
+++ b/drivers/media/platform/msm/npu/npu_dbg.c
@@ -17,18 +17,6 @@
  * Function Definitions - Debug
  * -------------------------------------------------------------------------
  */
-static void npu_dump_debug_timeout_stats(struct npu_device *npu_dev)
-{
-	uint32_t reg_val;
-
-	reg_val = REGR(npu_dev, REG_FW_JOB_CNT_START);
-	NPU_INFO("fw jobs execute started count = %d\n", reg_val);
-	reg_val = REGR(npu_dev, REG_FW_JOB_CNT_END);
-	NPU_INFO("fw jobs execute finished count = %d\n", reg_val);
-	reg_val = REGR(npu_dev, REG_NPU_FW_DEBUG_DATA);
-	NPU_INFO("fw jobs aco parser debug = %d\n", reg_val);
-}
-
 void npu_dump_ipc_packet(struct npu_device *npu_dev, void *cmd_ptr)
 {
 	int32_t *ptr = (int32_t *)cmd_ptr;
@@ -50,7 +38,7 @@ static void npu_dump_ipc_queue(struct npu_device *npu_dev, uint32_t target_que)
 		target_que * sizeof(struct hfi_queue_header);
 	int32_t *ptr = (int32_t *)&queue;
 	size_t content_off;
-	uint32_t *content;
+	uint32_t *content, content_size;
 	int i;
 
 	MEMR(npu_dev, (void *)((size_t)offset), (uint8_t *)&queue,
@@ -58,21 +46,42 @@ static void npu_dump_ipc_queue(struct npu_device *npu_dev, uint32_t target_que)
 
 	NPU_ERR("DUMP IPC queue %d:\n", target_que);
 	NPU_ERR("Header size %d:\n", HFI_QUEUE_HEADER_SIZE);
-	NPU_ERR("Content size %d:\n", queue.qhdr_q_size);
 	NPU_ERR("============QUEUE HEADER=============\n");
 	for (i = 0; i < HFI_QUEUE_HEADER_SIZE/4; i++)
 		NPU_ERR("%x\n", ptr[i]);
 
-	content_off = (size_t)IPC_ADDR + queue.qhdr_start_offset;
-	content = kzalloc(queue.qhdr_q_size, GFP_KERNEL);
+	content_off = (size_t)(IPC_ADDR + queue.qhdr_start_offset +
+		queue.qhdr_read_idx);
+	if (queue.qhdr_write_idx >= queue.qhdr_read_idx)
+		content_size = queue.qhdr_write_idx - queue.qhdr_read_idx;
+	else
+		content_size = queue.qhdr_q_size - queue.qhdr_read_idx +
+			queue.qhdr_write_idx;
+
+	NPU_ERR("Content size %d:\n", content_size);
+	if (content_size == 0)
+		return;
+
+	content = kzalloc(content_size, GFP_KERNEL);
 	if (!content) {
 		NPU_ERR("failed to allocate IPC queue content buffer\n");
 		return;
 	}
 
-	MEMR(npu_dev, (void *)content_off, content, queue.qhdr_q_size);
+	if (queue.qhdr_write_idx >= queue.qhdr_read_idx) {
+		MEMR(npu_dev, (void *)content_off, content, content_size);
+	} else {
+		MEMR(npu_dev, (void *)content_off, content,
+			queue.qhdr_q_size - queue.qhdr_read_idx);
+
+		MEMR(npu_dev, (void *)((size_t)IPC_ADDR +
+			queue.qhdr_start_offset),
+			(void *)((size_t)content + queue.qhdr_q_size -
+			queue.qhdr_read_idx), queue.qhdr_write_idx);
+	}
+
 	NPU_ERR("============QUEUE CONTENT=============\n");
-	for (i = 0; i < queue.qhdr_q_size/4; i++)
+	for (i = 0; i < content_size/4; i++)
 		NPU_ERR("%x\n", content[i]);
 
 	NPU_ERR("DUMP IPC queue %d END\n", target_que);
@@ -110,7 +119,6 @@ void npu_dump_debug_info(struct npu_device *npu_dev)
 		return;
 	}
 
-	npu_dump_debug_timeout_stats(npu_dev);
 	npu_dump_dbg_registers(npu_dev);
 	npu_dump_all_ipc_queue(npu_dev);
 }
diff --git a/drivers/media/platform/msm/npu/npu_debugfs.c b/drivers/media/platform/msm/npu/npu_debugfs.c
index 8653f38..4c46f77 100644
--- a/drivers/media/platform/msm/npu/npu_debugfs.c
+++ b/drivers/media/platform/msm/npu/npu_debugfs.c
@@ -25,6 +25,7 @@
  */
 static int npu_debug_open(struct inode *inode, struct file *file);
 static int npu_debug_release(struct inode *inode, struct file *file);
+static int npu_debug_reg_release(struct inode *inode, struct file *file);
 static ssize_t npu_debug_reg_write(struct file *file,
 		const char __user *user_buf, size_t count, loff_t *ppos);
 static ssize_t npu_debug_reg_read(struct file *file,
@@ -46,7 +47,7 @@ struct npu_device *g_npu_dev;
 
 static const struct file_operations npu_reg_fops = {
 	.open = npu_debug_open,
-	.release = npu_debug_release,
+	.release = npu_debug_reg_release,
 	.read = npu_debug_reg_read,
 	.write = npu_debug_reg_write,
 };
@@ -86,6 +87,11 @@ static int npu_debug_open(struct inode *inode, struct file *file)
 
 static int npu_debug_release(struct inode *inode, struct file *file)
 {
+	return 0;
+}
+
+static int npu_debug_reg_release(struct inode *inode, struct file *file)
+{
 	struct npu_device *npu_dev = file->private_data;
 	struct npu_debugfs_ctx *debugfs;
 
diff --git a/drivers/media/platform/msm/npu/npu_dev.c b/drivers/media/platform/msm/npu/npu_dev.c
index b54bdeb..a7851bd 100644
--- a/drivers/media/platform/msm/npu/npu_dev.c
+++ b/drivers/media/platform/msm/npu/npu_dev.c
@@ -16,6 +16,7 @@
 #include <linux/regulator/consumer.h>
 #include <linux/thermal.h>
 #include <linux/soc/qcom/llcc-qcom.h>
+#include <soc/qcom/devfreq_devbw.h>
 
 #include "npu_common.h"
 #include "npu_hw.h"
@@ -60,6 +61,8 @@ static ssize_t perf_mode_override_store(struct device *dev,
 static ssize_t boot_store(struct device *dev,
 					  struct device_attribute *attr,
 					  const char *buf, size_t count);
+static void npu_suspend_devbw(struct npu_device *npu_dev);
+static void npu_resume_devbw(struct npu_device *npu_dev);
 static bool npu_is_post_clock(const char *clk_name);
 static bool npu_is_exclude_rate_clock(const char *clk_name);
 static int npu_get_max_state(struct thermal_cooling_device *cdev,
@@ -335,26 +338,30 @@ int npu_enable_core_power(struct npu_device *npu_dev)
 	struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
 	int ret = 0;
 
+	mutex_lock(&npu_dev->dev_lock);
+	NPU_DBG("Enable core power %d\n", pwr->pwr_vote_num);
 	if (!pwr->pwr_vote_num) {
 		ret = npu_enable_regulators(npu_dev);
 		if (ret)
-			return ret;
+			goto fail;
 
 		ret = npu_set_bw(npu_dev, 100, 100);
 		if (ret) {
 			npu_disable_regulators(npu_dev);
-			return ret;
+			goto fail;
 		}
 
 		ret = npu_enable_core_clocks(npu_dev);
 		if (ret) {
 			npu_set_bw(npu_dev, 0, 0);
 			npu_disable_regulators(npu_dev);
-			pwr->pwr_vote_num = 0;
-			return ret;
+			goto fail;
 		}
+		npu_resume_devbw(npu_dev);
 	}
 	pwr->pwr_vote_num++;
+fail:
+	mutex_unlock(&npu_dev->dev_lock);
 
 	return ret;
 }
@@ -363,10 +370,16 @@ void npu_disable_core_power(struct npu_device *npu_dev)
 {
 	struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
 
-	if (!pwr->pwr_vote_num)
+	mutex_lock(&npu_dev->dev_lock);
+	NPU_DBG("Disable core power %d\n", pwr->pwr_vote_num);
+	if (!pwr->pwr_vote_num) {
+		mutex_unlock(&npu_dev->dev_lock);
 		return;
+	}
+
 	pwr->pwr_vote_num--;
 	if (!pwr->pwr_vote_num) {
+		npu_suspend_devbw(npu_dev);
 		npu_disable_core_clocks(npu_dev);
 		npu_set_bw(npu_dev, 0, 0);
 		npu_disable_regulators(npu_dev);
@@ -376,6 +389,7 @@ void npu_disable_core_power(struct npu_device *npu_dev)
 		NPU_DBG("setting back to power level=%d\n",
 			pwr->active_pwrlevel);
 	}
+	mutex_unlock(&npu_dev->dev_lock);
 }
 
 static int npu_enable_core_clocks(struct npu_device *npu_dev)
@@ -559,6 +573,42 @@ int npu_set_uc_power_level(struct npu_device *npu_dev,
 }
 
 /* -------------------------------------------------------------------------
+ * Bandwidth Monitor Related
+ * -------------------------------------------------------------------------
+ */
+static void npu_suspend_devbw(struct npu_device *npu_dev)
+{
+	struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
+	int ret, i;
+
+	if (pwr->bwmon_enabled && (pwr->devbw_num > 0)) {
+		for (i = 0; i < pwr->devbw_num; i++) {
+			ret = devfreq_suspend_devbw(pwr->devbw[i]);
+			if (ret)
+				NPU_ERR("devfreq_suspend_devbw failed rc:%d\n",
+					ret);
+		}
+		pwr->bwmon_enabled = 0;
+	}
+}
+
+static void npu_resume_devbw(struct npu_device *npu_dev)
+{
+	struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
+	int ret, i;
+
+	if (!pwr->bwmon_enabled && (pwr->devbw_num > 0)) {
+		for (i = 0; i < pwr->devbw_num; i++) {
+			ret = devfreq_resume_devbw(pwr->devbw[i]);
+			if (ret)
+				NPU_ERR("devfreq_resume_devbw failed rc:%d\n",
+					ret);
+		}
+		pwr->bwmon_enabled = 1;
+	}
+}
+
+/* -------------------------------------------------------------------------
  * Clocks Related
  * -------------------------------------------------------------------------
  */
@@ -625,10 +675,7 @@ static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil)
 				continue;
 		}
 
-		NPU_DBG("enabling clock %s\n", core_clks[i].clk_name);
-
 		if (core_clks[i].reset) {
-			NPU_DBG("Deassert %s\n", core_clks[i].clk_name);
 			rc = reset_control_deassert(core_clks[i].reset);
 			if (rc)
 				NPU_WARN("deassert %s reset failed\n",
@@ -645,9 +692,6 @@ static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil)
 		if (npu_is_exclude_rate_clock(core_clks[i].clk_name))
 			continue;
 
-		NPU_DBG("setting rate of clock %s to %ld\n",
-			core_clks[i].clk_name, pwrlevel->clk_freq[i]);
-
 		rc = clk_set_rate(core_clks[i].clk,
 			pwrlevel->clk_freq[i]);
 		/* not fatal error, keep using previous clk rate */
@@ -668,11 +712,9 @@ static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil)
 				if (npu_is_post_clock(core_clks[i].clk_name))
 					continue;
 			}
-			NPU_DBG("disabling clock %s\n", core_clks[i].clk_name);
 			clk_disable_unprepare(core_clks[i].clk);
 
 			if (core_clks[i].reset) {
-				NPU_DBG("Assert %s\n", core_clks[i].clk_name);
 				rc = reset_control_assert(core_clks[i].reset);
 				if (rc)
 					NPU_WARN("assert %s reset failed\n",
@@ -700,9 +742,6 @@ static void npu_disable_clocks(struct npu_device *npu_dev, bool post_pil)
 
 		/* set clock rate to 0 before disabling it */
 		if (!npu_is_exclude_rate_clock(core_clks[i].clk_name)) {
-			NPU_DBG("setting rate of clock %s to 0\n",
-				core_clks[i].clk_name);
-
 			rc = clk_set_rate(core_clks[i].clk, 0);
 			if (rc) {
 				NPU_ERR("clk_set_rate %s to 0 failed\n",
@@ -710,11 +749,9 @@ static void npu_disable_clocks(struct npu_device *npu_dev, bool post_pil)
 			}
 		}
 
-		NPU_DBG("disabling clock %s\n", core_clks[i].clk_name);
 		clk_disable_unprepare(core_clks[i].clk);
 
 		if (core_clks[i].reset) {
-			NPU_DBG("Assert %s\n", core_clks[i].clk_name);
 			rc = reset_control_assert(core_clks[i].reset);
 			if (rc)
 				NPU_WARN("assert %s reset failed\n",
@@ -788,11 +825,15 @@ static int npu_enable_regulators(struct npu_device *npu_dev)
 					regulators[i].regulator_name);
 				break;
 			}
-			NPU_DBG("regulator %s enabled\n",
-				regulators[i].regulator_name);
 		}
 	}
-	host_ctx->power_vote_num++;
+
+	if (rc) {
+		for (i--; i >= 0; i--)
+			regulator_disable(regulators[i].regulator);
+	} else {
+		host_ctx->power_vote_num++;
+	}
 	return rc;
 }
 
@@ -803,11 +844,9 @@ static void npu_disable_regulators(struct npu_device *npu_dev)
 	struct npu_regulator *regulators = npu_dev->regulators;
 
 	if (host_ctx->power_vote_num > 0) {
-		for (i = 0; i < npu_dev->regulator_num; i++) {
+		for (i = 0; i < npu_dev->regulator_num; i++)
 			regulator_disable(regulators[i].regulator);
-			NPU_DBG("regulator %s disabled\n",
-				regulators[i].regulator_name);
-		}
+
 		host_ctx->power_vote_num--;
 	}
 }
@@ -839,13 +878,12 @@ int npu_enable_irq(struct npu_device *npu_dev)
 	reg_val |= RSC_SHUTDOWN_REQ_IRQ_ENABLE | RSC_BRINGUP_REQ_IRQ_ENABLE;
 	npu_cc_reg_write(npu_dev, NPU_CC_NPU_MASTERn_GENERAL_IRQ_ENABLE(0),
 		reg_val);
-	for (i = 0; i < NPU_MAX_IRQ; i++) {
-		if (npu_dev->irq[i].irq != 0) {
+	for (i = 0; i < NPU_MAX_IRQ; i++)
+		if (npu_dev->irq[i].irq != 0)
 			enable_irq(npu_dev->irq[i].irq);
-			NPU_DBG("enable irq %d\n", npu_dev->irq[i].irq);
-		}
-	}
+
 	npu_dev->irq_enabled = true;
+	NPU_DBG("irq enabled\n");
 
 	return 0;
 }
@@ -860,12 +898,9 @@ void npu_disable_irq(struct npu_device *npu_dev)
 		return;
 	}
 
-	for (i = 0; i < NPU_MAX_IRQ; i++) {
-		if (npu_dev->irq[i].irq != 0) {
+	for (i = 0; i < NPU_MAX_IRQ; i++)
+		if (npu_dev->irq[i].irq != 0)
 			disable_irq(npu_dev->irq[i].irq);
-			NPU_DBG("disable irq %d\n", npu_dev->irq[i].irq);
-		}
-	}
 
 	reg_val = npu_cc_reg_read(npu_dev,
 		NPU_CC_NPU_MASTERn_GENERAL_IRQ_OWNER(0));
@@ -880,6 +915,7 @@ void npu_disable_irq(struct npu_device *npu_dev)
 	npu_cc_reg_write(npu_dev, NPU_CC_NPU_MASTERn_GENERAL_IRQ_CLEAR(0),
 		RSC_SHUTDOWN_REQ_IRQ_ENABLE | RSC_BRINGUP_REQ_IRQ_ENABLE);
 	npu_dev->irq_enabled = false;
+	NPU_DBG("irq disabled\n");
 }
 
 /* -------------------------------------------------------------------------
@@ -925,12 +961,13 @@ int npu_enable_sys_cache(struct npu_device *npu_dev)
 		REGW(npu_dev, NPU_CACHEMAP1_ATTR_METADATA_IDn(3), reg_val);
 		REGW(npu_dev, NPU_CACHEMAP1_ATTR_METADATA_IDn(4), reg_val);
 
-		NPU_DBG("prior to activate sys cache\n");
 		rc = llcc_slice_activate(npu_dev->sys_cache);
-		if (rc)
+		if (rc) {
 			NPU_ERR("failed to activate sys cache\n");
-		else
-			NPU_DBG("sys cache activated\n");
+			llcc_slice_putd(npu_dev->sys_cache);
+			npu_dev->sys_cache = NULL;
+			rc = 0;
+		}
 	}
 
 	return rc;
@@ -1584,8 +1621,6 @@ int npu_set_bw(struct npu_device *npu_dev, int new_ib, int new_ab)
 	bwctrl->bw_levels[i].vectors[1].ib = new_ib * MBYTE;
 	bwctrl->bw_levels[i].vectors[1].ab = new_ab / bwctrl->num_paths * MBYTE;
 
-	NPU_INFO("BW MBps: AB: %d IB: %d\n", new_ab, new_ib);
-
 	ret = msm_bus_scale_client_update_request(bwctrl->bus_client, i);
 	if (ret) {
 		NPU_ERR("bandwidth request failed (%d)\n", ret);
@@ -1716,7 +1751,9 @@ static int npu_pwrctrl_init(struct npu_device *npu_dev)
 {
 	struct platform_device *pdev = npu_dev->pdev;
 	struct device_node *node;
-	int ret = 0;
+	int ret = 0, i;
+	struct platform_device *p2dev;
+	struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
 
 	/* Power levels */
 	node = of_find_node_by_name(pdev->dev.of_node, "qcom,npu-pwrlevels");
@@ -1730,6 +1767,47 @@ static int npu_pwrctrl_init(struct npu_device *npu_dev)
 	if (ret)
 		return ret;
 
+	/* Parse Bandwidth Monitor */
+	pwr->devbw_num = of_property_count_strings(pdev->dev.of_node,
+			"qcom,npubw-dev-names");
+	if (pwr->devbw_num <= 0) {
+		NPU_INFO("npubw-dev-names are not defined\n");
+		return 0;
+	} else if (pwr->devbw_num > NPU_MAX_BW_DEVS) {
+		NPU_ERR("number of devbw %d exceeds limit\n", pwr->devbw_num);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < pwr->devbw_num; i++) {
+		node = of_parse_phandle(pdev->dev.of_node,
+				"qcom,npubw-devs", i);
+
+		if (node) {
+			p2dev = of_find_device_by_node(node);
+			of_node_put(node);
+			if (p2dev) {
+				pwr->devbw[i] = &p2dev->dev;
+			} else {
+				NPU_ERR("can't find devbw%d\n", i);
+				ret = -EINVAL;
+				break;
+			}
+		} else {
+			NPU_ERR("can't find devbw node\n");
+			ret = -EINVAL;
+			break;
+		}
+	}
+
+	if (ret) {
+		/* Allow npu work without bwmon */
+		pwr->devbw_num = 0;
+		ret = 0;
+	} else {
+		/* Set to 1 initially - we assume bwmon is on */
+		pwr->bwmon_enabled = 1;
+	}
+
 	return ret;
 }
 
@@ -2035,6 +2113,7 @@ static int npu_probe(struct platform_device *pdev)
 		return -EFAULT;
 
 	npu_dev->pdev = pdev;
+	mutex_init(&npu_dev->dev_lock);
 
 	platform_set_drvdata(pdev, npu_dev);
 	res = platform_get_resource_byname(pdev,
@@ -2224,8 +2303,6 @@ static int npu_probe(struct platform_device *pdev)
 	if (rc)
 		goto error_driver_init;
 
-	mutex_init(&npu_dev->dev_lock);
-
 	rc = npu_host_init(npu_dev);
 	if (rc) {
 		NPU_ERR("unable to init host\n");
diff --git a/drivers/media/platform/msm/npu/npu_firmware.h b/drivers/media/platform/msm/npu/npu_firmware.h
index 8c0385d..3d8537b 100644
--- a/drivers/media/platform/msm/npu/npu_firmware.h
+++ b/drivers/media/platform/msm/npu/npu_firmware.h
@@ -29,11 +29,6 @@
 /* Data value for debug */
 #define REG_NPU_FW_DEBUG_DATA       NPU_GPR13
 
-/* Started job count */
-#define REG_FW_JOB_CNT_START        NPU_GPR14
-/* Finished job count */
-#define REG_FW_JOB_CNT_END          NPU_GPR15
-
 /* NPU FW Control/Status Register */
 /* bit fields definitions in CTRL STATUS REG */
 #define FW_CTRL_STATUS_IPC_READY_BIT            0
diff --git a/drivers/media/platform/msm/npu/npu_host_ipc.c b/drivers/media/platform/msm/npu/npu_host_ipc.c
index 85e8187..62feb8c 100644
--- a/drivers/media/platform/msm/npu/npu_host_ipc.c
+++ b/drivers/media/platform/msm/npu/npu_host_ipc.c
@@ -166,9 +166,7 @@ static int npu_host_ipc_send_cmd_hfi(struct npu_device *npu_dev,
 			status = INTERRUPT_RAISE_NPU(npu_dev);
 	}
 
-	if (status == 0)
-		NPU_DBG("Cmd Msg put on Command Queue - SUCCESSS\n");
-	else
+	if (status)
 		NPU_ERR("Cmd Msg put on Command Queue - FAILURE\n");
 
 	return status;
@@ -238,6 +236,13 @@ static int ipc_queue_read(struct npu_device *npu_dev,
 		status = -EPERM;
 		goto exit;
 	}
+
+	if (packet_size > NPU_IPC_BUF_LENGTH) {
+		NPU_ERR("Invalid packet size %d\n", packet_size);
+		status = -EINVAL;
+		goto exit;
+	}
+
 	new_read_idx = queue.qhdr_read_idx + packet_size;
 
 	if (new_read_idx < (queue.qhdr_q_size)) {
diff --git a/drivers/media/platform/msm/npu/npu_mgr.c b/drivers/media/platform/msm/npu/npu_mgr.c
index cf449af..b738a8b 100644
--- a/drivers/media/platform/msm/npu/npu_mgr.c
+++ b/drivers/media/platform/msm/npu/npu_mgr.c
@@ -99,7 +99,6 @@ static int load_fw_nolock(struct npu_device *npu_dev, bool enable)
 	}
 
 	/* Keep reading ctrl status until NPU is ready */
-	NPU_DBG("waiting for status ready from fw\n");
 	if (wait_for_status_ready(npu_dev, REG_NPU_FW_CTRL_STATUS,
 		FW_CTRL_STATUS_MAIN_THREAD_READY_VAL)) {
 		ret = -EPERM;
@@ -194,7 +193,6 @@ int load_fw(struct npu_device *npu_dev)
 int unload_fw(struct npu_device *npu_dev)
 {
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
-	int ret = 0;
 
 	if (host_ctx->auto_pil_disable) {
 		NPU_WARN("auto pil is disabled\n");
@@ -212,16 +210,7 @@ int unload_fw(struct npu_device *npu_dev)
 		return -EBUSY;
 	}
 
-	/* vote minimum bandwidth before unload npu fw via PIL */
-	ret = npu_set_bw(npu_dev, 100, 100);
-	if (ret) {
-		NPU_ERR("Can't update bandwidth\n");
-		mutex_unlock(&host_ctx->lock);
-		return ret;
-	}
-
 	subsystem_put_local(host_ctx->subsystem_handle);
-	npu_set_bw(npu_dev, 0, 0);
 	host_ctx->fw_state = FW_UNLOADED;
 	NPU_DBG("fw is unloaded\n");
 	mutex_unlock(&host_ctx->lock);
@@ -531,9 +520,18 @@ static int npu_notifier_cb(struct notifier_block *this, unsigned long code,
 			npu_disable_core_power(npu_dev);
 			npu_notify_aop(npu_dev, false);
 		}
+
+		/* vote minimum bandwidth before unload npu fw via PIL */
+		ret = npu_set_bw(npu_dev, 100, 100);
+		if (ret)
+			NPU_WARN("Can't update bandwidth\n");
+
 		break;
 	}
 	case SUBSYS_AFTER_SHUTDOWN:
+		ret = npu_set_bw(npu_dev, 0, 0);
+		if (ret)
+			NPU_WARN("Can't update bandwidth\n");
 		break;
 	default:
 		NPU_DBG("Ignoring event\n");
@@ -592,12 +590,14 @@ int npu_host_init(struct npu_device *npu_dev)
 	if (IS_ERR(host_ctx->notif_hdle)) {
 		NPU_ERR("register event notification failed\n");
 		sts = PTR_ERR(host_ctx->notif_hdle);
-		return sts;
+		host_ctx->notif_hdle = NULL;
+		goto fail;
 	}
 
 	host_ctx->wq = create_workqueue("npu_irq_hdl");
 	if (!host_ctx->wq) {
 		sts = -EPERM;
+		goto fail;
 	} else {
 		INIT_WORK(&host_ctx->ipc_irq_work, npu_ipc_irq_work);
 		INIT_WORK(&host_ctx->wdg_err_irq_work, npu_wdg_err_irq_work);
@@ -608,16 +608,33 @@ int npu_host_init(struct npu_device *npu_dev)
 			npu_disable_fw_work);
 	}
 
+	host_ctx->ipc_msg_buf = kzalloc(NPU_IPC_BUF_LENGTH, GFP_KERNEL);
+	if (!host_ctx->ipc_msg_buf) {
+		NPU_ERR("Failed to allocate ipc buffer\n");
+		sts = -ENOMEM;
+		goto fail;
+	}
+
 	host_ctx->auto_pil_disable = false;
 
 	return sts;
+fail:
+	if (host_ctx->wq)
+		destroy_workqueue(host_ctx->wq);
+	if (host_ctx->notif_hdle)
+		subsys_notif_unregister_notifier(host_ctx->notif_hdle,
+			&host_ctx->nb);
+	mutex_destroy(&host_ctx->lock);
+	return sts;
 }
 
 void npu_host_deinit(struct npu_device *npu_dev)
 {
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 
+	kfree(host_ctx->ipc_msg_buf);
 	destroy_workqueue(host_ctx->wq);
+	subsys_notif_unregister_notifier(host_ctx->notif_hdle, &host_ctx->nb);
 	mutex_destroy(&host_ctx->lock);
 }
 
@@ -630,7 +647,6 @@ irqreturn_t npu_ipc_intr_hdlr(int irq, void *ptr)
 	struct npu_device *npu_dev = (struct npu_device *)ptr;
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 
-	NPU_DBG("NPU ipc irq %d\n", irq);
 	INTERRUPT_ACK(npu_dev, irq);
 
 	/* Check that the event thread currently is running */
@@ -646,23 +662,17 @@ irqreturn_t npu_general_intr_hdlr(int irq, void *ptr)
 	struct npu_device *npu_dev = (struct npu_device *)ptr;
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 
-	NPU_DBG("NPU general irq %d\n", irq);
-
 	reg_val = npu_cc_reg_read(npu_dev,
 		NPU_CC_NPU_MASTERn_GENERAL_IRQ_STATUS(0));
 	NPU_DBG("GENERAL_IRQ_STATUS %x\n", reg_val);
 	reg_val &= (RSC_SHUTDOWN_REQ_IRQ_STATUS | RSC_BRINGUP_REQ_IRQ_STATUS);
 	ack_val = npu_cc_reg_read(npu_dev, NPU_CC_NPU_CPC_RSC_CTRL);
 
-	if (reg_val & RSC_SHUTDOWN_REQ_IRQ_STATUS) {
-		NPU_DBG("Send SHUTDOWN ACK\n");
+	if (reg_val & RSC_SHUTDOWN_REQ_IRQ_STATUS)
 		ack_val |= Q6SS_RSC_SHUTDOWN_ACK_EN;
-	}
 
-	if (reg_val & RSC_BRINGUP_REQ_IRQ_STATUS) {
-		NPU_DBG("Send BRINGUP ACK\n");
+	if (reg_val & RSC_BRINGUP_REQ_IRQ_STATUS)
 		ack_val |= Q6SS_RSC_BRINGUP_ACK_EN;
-	}
 
 	npu_cc_reg_write(npu_dev, NPU_CC_NPU_CPC_RSC_CTRL, ack_val);
 	npu_cc_reg_write(npu_dev,
@@ -774,7 +784,6 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force)
 	}
 
 	/* Keep reading ctrl status until NPU is ready */
-	NPU_DBG("waiting for status ready from fw\n");
 	if (wait_for_status_ready(npu_dev, REG_NPU_FW_CTRL_STATUS,
 		FW_CTRL_STATUS_MAIN_THREAD_READY_VAL)) {
 		NPU_ERR("wait for fw status ready timedout\n");
@@ -1181,14 +1190,6 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
 		NPU_DBG("NPU_IPC_MSG_EXECUTE_DONE status: %d\n",
 			exe_rsp_pkt->header.status);
 		NPU_DBG("trans_id : %d\n", exe_rsp_pkt->header.trans_id);
-		NPU_DBG("e2e_IPC_time: %d (in tick count)\n",
-			exe_rsp_pkt->stats.e2e_ipc_tick_count);
-		NPU_DBG("aco_load_time: %d (in tick count)\n",
-			exe_rsp_pkt->stats.aco_load_tick_count);
-		NPU_DBG("aco_execute_time: %d (in tick count)\n",
-			exe_rsp_pkt->stats.aco_execution_tick_count);
-		NPU_DBG("total_num_layers: %d\n",
-			exe_rsp_pkt->stats.exe_stats.total_num_layers);
 
 		network = get_network_by_hdl(host_ctx, NULL,
 			exe_rsp_pkt->network_hdl);
@@ -1510,13 +1511,14 @@ static int npu_send_network_cmd(struct npu_device *npu_dev,
 		NPU_ERR("Another cmd is pending\n");
 		ret = -EBUSY;
 	} else {
-		NPU_DBG("Send cmd %d network id %lld\n",
-			((struct ipc_cmd_header_pkt *)cmd_ptr)->cmd_type,
-			network->id);
 		network->cmd_async = async;
 		network->cmd_ret_status = 0;
 		network->cmd_pending = true;
 		network->trans_id = atomic_read(&host_ctx->ipc_trans_id);
+		reinit_completion(&network->cmd_done);
+		NPU_DBG("Send cmd %d network id %llx trans id %d\n",
+			((struct ipc_cmd_header_pkt *)cmd_ptr)->cmd_type,
+			network->id, network->trans_id);
 		ret = npu_host_ipc_send_cmd(npu_dev,
 			IPC_QUEUE_APPS_EXEC, cmd_ptr);
 		if (ret)
@@ -1592,7 +1594,7 @@ static uint32_t find_networks_perf_mode(struct npu_host_ctx *host_ctx)
 			network++;
 		}
 	}
-	pr_debug("max perf mode for networks: %d\n", max_perf_mode);
+	NPU_DBG("max perf mode for networks: %d\n", max_perf_mode);
 
 	return max_perf_mode;
 }
@@ -1665,8 +1667,6 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
 		goto error_free_network;
 	}
 
-	NPU_DBG("network address %llx\n", network->phy_add);
-
 	ret = set_perf_mode(npu_dev);
 	if (ret) {
 		NPU_ERR("set_perf_mode failed\n");
@@ -1686,11 +1686,9 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
 	load_packet->buf_pkt.num_layers = network->num_layers;
 	load_packet->num_patch_params = num_patch_params;
 
-	/* NPU_IPC_CMD_LOAD_V2 will go onto IPC_QUEUE_APPS_EXEC */
-	reinit_completion(&network->cmd_done);
 	ret = npu_send_network_cmd(npu_dev, network, load_packet, false, false);
 	if (ret) {
-		NPU_DBG("NPU_IPC_CMD_LOAD_V2 sent failed: %d\n", ret);
+		NPU_ERR("NPU_IPC_CMD_LOAD_V2 sent failed: %d\n", ret);
 		goto error_free_network;
 	}
 
@@ -1703,19 +1701,20 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
 
 	mutex_lock(&host_ctx->lock);
 
-	if (!ret) {
-		NPU_ERR("npu: NPU_IPC_CMD_LOAD time out\n");
-		npu_dump_debug_info(npu_dev);
-		ret = -ETIMEDOUT;
-		goto error_load_network;
-	}
-
 	if (network->fw_error) {
 		ret = -EIO;
 		NPU_ERR("fw is in error state during load_v2 network\n");
 		goto error_free_network;
 	}
 
+	if (!ret) {
+		NPU_ERR("npu: NPU_IPC_CMD_LOAD time out %lld:%d\n",
+			network->id, network->trans_id);
+		npu_dump_debug_info(npu_dev);
+		ret = -ETIMEDOUT;
+		goto error_load_network;
+	}
+
 	ret = network->cmd_ret_status;
 	if (ret)
 		goto error_free_network;
@@ -1789,8 +1788,6 @@ int32_t npu_host_unload_network(struct npu_client *client,
 	unload_packet.header.flags = 0;
 	unload_packet.network_hdl = (uint32_t)network->network_hdl;
 
-	/* NPU_IPC_CMD_UNLOAD will go onto IPC_QUEUE_APPS_EXEC */
-	reinit_completion(&network->cmd_done);
 	ret = npu_send_network_cmd(npu_dev, network, &unload_packet, false,
 		false);
 
@@ -1818,21 +1815,23 @@ int32_t npu_host_unload_network(struct npu_client *client,
 
 	mutex_lock(&host_ctx->lock);
 
+	if (network->fw_error) {
+		ret = -EIO;
+		NPU_ERR("fw is in error state during unload network\n");
+		goto free_network;
+	}
+
 	if (!ret) {
-		NPU_ERR("npu: NPU_IPC_CMD_UNLOAD time out\n");
+		NPU_ERR("npu: NPU_IPC_CMD_UNLOAD time out %llx:%d\n",
+			network->id, network->trans_id);
 		npu_dump_debug_info(npu_dev);
 		network->cmd_pending = false;
 		ret = -ETIMEDOUT;
 		goto free_network;
 	}
 
-	if (network->fw_error) {
-		ret = -EIO;
-		NPU_ERR("fw is in error state during unload network\n");
-	} else {
-		ret = network->cmd_ret_status;
-		NPU_DBG("unload network status %d\n", ret);
-	}
+	ret = network->cmd_ret_status;
+	NPU_DBG("unload network status %d\n", ret);
 
 free_network:
 	/*
@@ -1930,8 +1929,6 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
 	NPU_DBG("Execute_v2 flags %x stats_buf_size %d\n",
 		exec_packet->header.flags, exec_ioctl->stats_buf_size);
 
-	/* Send it on the high priority queue */
-	reinit_completion(&network->cmd_done);
 	ret = npu_send_network_cmd(npu_dev, network, exec_packet, async_ioctl,
 		false);
 
@@ -1953,21 +1950,21 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
 		NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
 
 	mutex_lock(&host_ctx->lock);
-	if (!ret) {
-		NPU_ERR("npu: %x NPU_IPC_CMD_EXECUTE_V2 time out\n",
-			network->id);
-		npu_dump_debug_info(npu_dev);
-		network->cmd_pending = false;
-		ret = -ETIMEDOUT;
-		goto free_exec_packet;
-	}
-
 	if (network->fw_error) {
 		ret = -EIO;
 		NPU_ERR("fw is in error state during execute_v2 network\n");
 		goto free_exec_packet;
 	}
 
+	if (!ret) {
+		NPU_ERR("npu: %llx:%d NPU_IPC_CMD_EXECUTE_V2 time out\n",
+			network->id, network->trans_id);
+		npu_dump_debug_info(npu_dev);
+		network->cmd_pending = false;
+		ret = -ETIMEDOUT;
+		goto free_exec_packet;
+	}
+
 	ret = network->cmd_ret_status;
 	if (!ret) {
 		exec_ioctl->stats_buf_size = network->stats_buf_size;
diff --git a/drivers/media/platform/msm/npu/npu_mgr.h b/drivers/media/platform/msm/npu/npu_mgr.h
index 6c14720..72976cb 100644
--- a/drivers/media/platform/msm/npu/npu_mgr.h
+++ b/drivers/media/platform/msm/npu/npu_mgr.h
@@ -26,7 +26,7 @@
 #define NPU_MBOX_IDLE_TIMEOUT msecs_to_jiffies(NPU_MBOX_IDLE_TIMEOUT_MS)
 #define FIRMWARE_VERSION 0x00001000
 #define MAX_LOADED_NETWORK 32
-#define NPU_IPC_BUF_LENGTH 512
+#define NPU_IPC_BUF_LENGTH 4096
 
 #define FW_DBG_MODE_PAUSE        (1 << 0)
 #define FW_DBG_MODE_INC_TIMEOUT  (1 << 1)
@@ -105,6 +105,7 @@ struct npu_host_ctx {
 	void *notif_hdle;
 	spinlock_t bridge_mbox_lock;
 	bool bridge_mbox_pwr_on;
+	void *ipc_msg_buf;
 };
 
 struct npu_device;
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index cd8db2c..fc57b6b 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -310,6 +310,7 @@ struct qseecom_control {
 	bool smcinvoke_support;
 	uint64_t qseecom_bridge_handle;
 	uint64_t ta_bridge_handle;
+	uint64_t user_contig_bridge_handle;
 
 	struct list_head  unregister_lsnr_pending_list_head;
 	wait_queue_head_t register_lsnr_pending_wq;
@@ -1778,20 +1779,17 @@ static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
 	else
 		qclk = &qseecom.ce_drv;
 
-	if (qclk->clk_access_cnt > 2) {
+	if (qclk->clk_access_cnt > 0) {
+		qclk->clk_access_cnt--;
+	} else {
 		pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
 		ret = -EINVAL;
-		goto err_dec_ref_cnt;
 	}
-	if (qclk->clk_access_cnt == 2)
-		qclk->clk_access_cnt--;
 
-err_dec_ref_cnt:
 	mutex_unlock(&clk_access_lock);
 	return ret;
 }
 
-
 static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
 {
 	int32_t ret = 0;
@@ -7750,6 +7748,13 @@ static long qseecom_ioctl(struct file *file,
 		break;
 	}
 	case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
+		if ((data->type != QSEECOM_GENERIC) &&
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("app loaded query req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
 		data->type = QSEECOM_CLIENT_APP;
 		mutex_lock(&app_access_lock);
 		atomic_inc(&data->ioctl_count);
@@ -9243,14 +9248,13 @@ static int qseecom_register_heap_shmbridge(uint32_t heapid, uint64_t *handle)
 	struct device_node *ion_node, *node;
 	struct platform_device *ion_pdev = NULL;
 	struct cma *cma = NULL;
-	int ret = -1;
 	uint32_t ns_vmids[] = {VMID_HLOS};
 	uint32_t ns_vm_perms[] = {PERM_READ | PERM_WRITE};
 
 	ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
 	if (!ion_node) {
 		pr_err("Failed to get qcom,msm-ion node\n");
-		return ret;
+		return -ENODEV;
 	}
 
 	for_each_available_child_of_node(ion_node, node) {
@@ -9259,23 +9263,22 @@ static int qseecom_register_heap_shmbridge(uint32_t heapid, uint64_t *handle)
 		ion_pdev = of_find_device_by_node(node);
 		if (!ion_pdev) {
 			pr_err("Failed to find node for heap %d\n", heapid);
-			break;
+			return -ENODEV;
 		}
 		cma = dev_get_cma_area(&ion_pdev->dev);
-		if (cma) {
-			heap_pa = cma_get_base(cma);
-			heap_size = (size_t)cma_get_size(cma);
-		} else {
+		if (!cma) {
 			pr_err("Failed to get Heap %d info\n", heapid);
+			return -ENODEV;
 		}
-		break;
-	}
-
-	if (heap_pa)
-		ret = qtee_shmbridge_register(heap_pa,
+		heap_pa = cma_get_base(cma);
+		heap_size = (size_t)cma_get_size(cma);
+		return qtee_shmbridge_register(heap_pa,
 				heap_size, ns_vmids, ns_vm_perms, 1,
 				PERM_READ | PERM_WRITE, handle);
-	return ret;
+	}
+
+	pr_warn("Could not get heap %d info: No shmbridge created\n", heapid);
+	return 0;
 }
 
 static int qseecom_register_shmbridge(void)
@@ -9283,22 +9286,31 @@ static int qseecom_register_shmbridge(void)
 	if (qseecom_register_heap_shmbridge(ION_QSECOM_TA_HEAP_ID,
 					&qseecom.ta_bridge_handle)) {
 		pr_err("Failed to register shmbridge for ta heap\n");
-		return -ENOMEM;
+		return -EINVAL;
 	}
 
 	if (qseecom_register_heap_shmbridge(ION_QSECOM_HEAP_ID,
 					&qseecom.qseecom_bridge_handle)) {
 		pr_err("Failed to register shmbridge for qseecom heap\n");
 		qtee_shmbridge_deregister(qseecom.ta_bridge_handle);
-		return -ENOMEM;
+		return -EINVAL;
+	}
+
+	if (qseecom_register_heap_shmbridge(ION_USER_CONTIG_HEAP_ID,
+					&qseecom.user_contig_bridge_handle)) {
+		pr_err("Failed to register shmbridge for user contig heap\n");
+		qtee_shmbridge_deregister(qseecom.qseecom_bridge_handle);
+		qtee_shmbridge_deregister(qseecom.ta_bridge_handle);
+		return -EINVAL;
 	}
 	return 0;
 }
 
 static void qseecom_deregister_shmbridge(void)
 {
-	qtee_shmbridge_deregister(qseecom.ta_bridge_handle);
+	qtee_shmbridge_deregister(qseecom.user_contig_bridge_handle);
 	qtee_shmbridge_deregister(qseecom.qseecom_bridge_handle);
+	qtee_shmbridge_deregister(qseecom.ta_bridge_handle);
 }
 
 static int qseecom_probe(struct platform_device *pdev)
diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
index ed5cefb..89deb45 100644
--- a/drivers/mmc/host/cavium.c
+++ b/drivers/mmc/host/cavium.c
@@ -374,6 +374,7 @@ static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
 {
 	data->bytes_xfered = data->blocks * data->blksz;
 	data->error = 0;
+	dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
 	return 1;
 }
 
@@ -1046,7 +1047,8 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
 		mmc->max_segs = 1;
 
 	/* DMA size field can address up to 8 MB */
-	mmc->max_seg_size = 8 * 1024 * 1024;
+	mmc->max_seg_size = min_t(unsigned int, 8 * 1024 * 1024,
+				  dma_get_max_seg_size(host->dev));
 	mmc->max_req_size = mmc->max_seg_size;
 	/* External DMA is in 512 byte blocks */
 	mmc->max_blk_size = 512;
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 7fdac27..9c77bfe 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -788,7 +788,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
 
 	ret = mmc_of_parse(host->mmc);
 	if (ret) {
-		dev_err(&pdev->dev, "parsing dt failed (%d)\n", ret);
+		if (ret != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "parsing dt failed (%d)\n", ret);
 		goto unreg_clk;
 	}
 
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index be0b785..0d2392c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1102,6 +1102,8 @@ static void bond_compute_features(struct bonding *bond)
 done:
 	bond_dev->vlan_features = vlan_features;
 	bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
+				    NETIF_F_HW_VLAN_CTAG_TX |
+				    NETIF_F_HW_VLAN_STAG_TX |
 				    NETIF_F_GSO_UDP_L4;
 	bond_dev->gso_max_segs = gso_max_segs;
 	netif_set_gso_max_size(bond_dev, gso_max_size);
@@ -2188,6 +2190,15 @@ static void bond_miimon_commit(struct bonding *bond)
 	bond_for_each_slave(bond, slave, iter) {
 		switch (slave->new_link) {
 		case BOND_LINK_NOCHANGE:
+			/* For 802.3ad mode, check current slave speed and
+			 * duplex again in case its port was disabled after
+			 * invalid speed/duplex reporting but recovered before
+			 * link monitoring could make a decision on the actual
+			 * link status
+			 */
+			if (BOND_MODE(bond) == BOND_MODE_8023AD &&
+			    slave->link == BOND_LINK_UP)
+				bond_3ad_adapter_speed_duplex_changed(slave);
 			continue;
 
 		case BOND_LINK_UP:
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index c05e4d5..bd127ce 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -1260,6 +1260,8 @@ int register_candev(struct net_device *dev)
 		return -EINVAL;
 
 	dev->rtnl_link_ops = &can_link_ops;
+	netif_carrier_off(dev);
+
 	return register_netdev(dev);
 }
 EXPORT_SYMBOL_GPL(register_candev);
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index 602c19e2..786d852 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -1512,10 +1512,11 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
 
 	/* All packets processed */
 	if (num_pkts < quota) {
-		napi_complete_done(napi, num_pkts);
-		/* Enable Rx FIFO interrupts */
-		rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
-				   RCANFD_RFCC_RFIE);
+		if (napi_complete_done(napi, num_pkts)) {
+			/* Enable Rx FIFO interrupts */
+			rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
+					   RCANFD_RFCC_RFIE);
+		}
 	}
 	return num_pkts;
 }
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index b8c39ed..179bfcd 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -487,7 +487,7 @@ static void pcan_free_channels(struct pcan_pccard *card)
 		if (!netdev)
 			continue;
 
-		strncpy(name, netdev->name, IFNAMSIZ);
+		strlcpy(name, netdev->name, IFNAMSIZ);
 
 		unregister_sja1000dev(netdev);
 
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index da64e71..fccb6bf 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -678,17 +678,6 @@ static int mcp251x_power_enable(struct regulator *reg, int enable)
 		return regulator_disable(reg);
 }
 
-static void mcp251x_open_clean(struct net_device *net)
-{
-	struct mcp251x_priv *priv = netdev_priv(net);
-	struct spi_device *spi = priv->spi;
-
-	free_irq(spi->irq, priv);
-	mcp251x_hw_sleep(spi);
-	mcp251x_power_enable(priv->transceiver, 0);
-	close_candev(net);
-}
-
 static int mcp251x_stop(struct net_device *net)
 {
 	struct mcp251x_priv *priv = netdev_priv(net);
@@ -954,37 +943,43 @@ static int mcp251x_open(struct net_device *net)
 				   flags | IRQF_ONESHOT, DEVICE_NAME, priv);
 	if (ret) {
 		dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
-		mcp251x_power_enable(priv->transceiver, 0);
-		close_candev(net);
-		goto open_unlock;
+		goto out_close;
 	}
 
 	priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
 				   0);
+	if (!priv->wq) {
+		ret = -ENOMEM;
+		goto out_clean;
+	}
 	INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
 	INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
 
 	ret = mcp251x_hw_reset(spi);
-	if (ret) {
-		mcp251x_open_clean(net);
-		goto open_unlock;
-	}
+	if (ret)
+		goto out_free_wq;
 	ret = mcp251x_setup(net, spi);
-	if (ret) {
-		mcp251x_open_clean(net);
-		goto open_unlock;
-	}
+	if (ret)
+		goto out_free_wq;
 	ret = mcp251x_set_normal_mode(spi);
-	if (ret) {
-		mcp251x_open_clean(net);
-		goto open_unlock;
-	}
+	if (ret)
+		goto out_free_wq;
 
 	can_led_event(net, CAN_LED_EVENT_OPEN);
 
 	netif_wake_queue(net);
+	mutex_unlock(&priv->mcp_lock);
 
-open_unlock:
+	return 0;
+
+out_free_wq:
+	destroy_workqueue(priv->wq);
+out_clean:
+	free_irq(spi->irq, priv);
+	mcp251x_hw_sleep(spi);
+out_close:
+	mcp251x_power_enable(priv->transceiver, 0);
+	close_candev(net);
 	mutex_unlock(&priv->mcp_lock);
 	return ret;
 }
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 611f9d3..43b0fa2b 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -576,16 +576,16 @@ static int peak_usb_ndo_stop(struct net_device *netdev)
 	dev->state &= ~PCAN_USB_STATE_STARTED;
 	netif_stop_queue(netdev);
 
+	close_candev(netdev);
+
+	dev->can.state = CAN_STATE_STOPPED;
+
 	/* unlink all pending urbs and free used memory */
 	peak_usb_unlink_all_urbs(dev);
 
 	if (dev->adapter->dev_stop)
 		dev->adapter->dev_stop(dev);
 
-	close_candev(netdev);
-
-	dev->can.state = CAN_STATE_STOPPED;
-
 	/* can set bus off now */
 	if (dev->adapter->dev_set_bus) {
 		int err = dev->adapter->dev_set_bus(dev, 0);
@@ -863,7 +863,7 @@ static void peak_usb_disconnect(struct usb_interface *intf)
 
 		dev_prev_siblings = dev->prev_siblings;
 		dev->state &= ~PCAN_USB_STATE_CONNECTED;
-		strncpy(name, netdev->name, IFNAMSIZ);
+		strlcpy(name, netdev->name, IFNAMSIZ);
 
 		unregister_netdev(netdev);
 
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index dd161c5..4198835 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -849,7 +849,7 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
 			goto err_out;
 
 		/* allocate command buffer once for all for the interface */
-		pdev->cmd_buffer_addr = kmalloc(PCAN_UFD_CMD_BUFFER_SIZE,
+		pdev->cmd_buffer_addr = kzalloc(PCAN_UFD_CMD_BUFFER_SIZE,
 						GFP_KERNEL);
 		if (!pdev->cmd_buffer_addr)
 			goto err_out_1;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index d516def..b304198 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -502,7 +502,7 @@ static int pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded)
 	u8 *buffer;
 	int err;
 
-	buffer = kmalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
+	buffer = kzalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
 	if (!buffer)
 		return -ENOMEM;
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 33baa17..cf01e73 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3058,12 +3058,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
 	/* if VF indicate to PF this function is going down (PF will delete sp
 	 * elements and clear initializations
 	 */
-	if (IS_VF(bp))
+	if (IS_VF(bp)) {
+		bnx2x_clear_vlan_info(bp);
 		bnx2x_vfpf_close_vf(bp);
-	else if (unload_mode != UNLOAD_RECOVERY)
+	} else if (unload_mode != UNLOAD_RECOVERY) {
 		/* if this is a normal/close unload need to clean up chip*/
 		bnx2x_chip_cleanup(bp, unload_mode, keep_link);
-	else {
+	} else {
 		/* Send the UNLOAD_REQUEST to the MCP */
 		bnx2x_send_unload_req(bp, unload_mode);
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 0e508e5..ee5159e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -425,6 +425,8 @@ void bnx2x_set_reset_global(struct bnx2x *bp);
 void bnx2x_disable_close_the_gate(struct bnx2x *bp);
 int bnx2x_init_hw_func_cnic(struct bnx2x *bp);
 
+void bnx2x_clear_vlan_info(struct bnx2x *bp);
+
 /**
  * bnx2x_sp_event - handle ramrods completion.
  *
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 2c9af0f..68c62e3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -8488,11 +8488,21 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
 	return rc;
 }
 
+void bnx2x_clear_vlan_info(struct bnx2x *bp)
+{
+	struct bnx2x_vlan_entry *vlan;
+
+	/* Mark that hw forgot all entries */
+	list_for_each_entry(vlan, &bp->vlan_reg, link)
+		vlan->hw = false;
+
+	bp->vlan_cnt = 0;
+}
+
 static int bnx2x_del_all_vlans(struct bnx2x *bp)
 {
 	struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
 	unsigned long ramrod_flags = 0, vlan_flags = 0;
-	struct bnx2x_vlan_entry *vlan;
 	int rc;
 
 	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
@@ -8501,10 +8511,7 @@ static int bnx2x_del_all_vlans(struct bnx2x *bp)
 	if (rc)
 		return rc;
 
-	/* Mark that hw forgot all entries */
-	list_for_each_entry(vlan, &bp->vlan_reg, link)
-		vlan->hw = false;
-	bp->vlan_cnt = 0;
+	bnx2x_clear_vlan_info(bp);
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index c34ea38..6be6de0 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3270,7 +3270,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	if (!adapter->regs) {
 		dev_err(&pdev->dev, "cannot map device registers\n");
 		err = -ENOMEM;
-		goto out_free_adapter;
+		goto out_free_adapter_nofail;
 	}
 
 	adapter->pdev = pdev;
@@ -3398,6 +3398,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 		if (adapter->port[i])
 			free_netdev(adapter->port[i]);
 
+out_free_adapter_nofail:
+	kfree_skb(adapter->nofail_skb);
+
 out_free_adapter:
 	kfree(adapter);
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index f2aba5b..d45c435 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -67,7 +67,8 @@ static struct ch_tc_pedit_fields pedits[] = {
 static struct ch_tc_flower_entry *allocate_flower_entry(void)
 {
 	struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
-	spin_lock_init(&new->lock);
+	if (new)
+		spin_lock_init(&new->lock);
 	return new;
 }
 
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 6127697..a91d49d 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -157,6 +157,7 @@ struct hip04_priv {
 	unsigned int reg_inten;
 
 	struct napi_struct napi;
+	struct device *dev;
 	struct net_device *ndev;
 
 	struct tx_desc *tx_desc;
@@ -185,7 +186,7 @@ struct hip04_priv {
 
 static inline unsigned int tx_count(unsigned int head, unsigned int tail)
 {
-	return (head - tail) % (TX_DESC_NUM - 1);
+	return (head - tail) % TX_DESC_NUM;
 }
 
 static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
@@ -387,7 +388,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force)
 		}
 
 		if (priv->tx_phys[tx_tail]) {
-			dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail],
+			dma_unmap_single(priv->dev, priv->tx_phys[tx_tail],
 					 priv->tx_skb[tx_tail]->len,
 					 DMA_TO_DEVICE);
 			priv->tx_phys[tx_tail] = 0;
@@ -437,8 +438,8 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 		return NETDEV_TX_BUSY;
 	}
 
-	phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
-	if (dma_mapping_error(&ndev->dev, phys)) {
+	phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
+	if (dma_mapping_error(priv->dev, phys)) {
 		dev_kfree_skb(skb);
 		return NETDEV_TX_OK;
 	}
@@ -497,6 +498,9 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
 	u16 len;
 	u32 err;
 
+	/* clean up tx descriptors */
+	tx_remaining = hip04_tx_reclaim(ndev, false);
+
 	while (cnt && !last) {
 		buf = priv->rx_buf[priv->rx_head];
 		skb = build_skb(buf, priv->rx_buf_size);
@@ -505,7 +509,7 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
 			goto refill;
 		}
 
-		dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
+		dma_unmap_single(priv->dev, priv->rx_phys[priv->rx_head],
 				 RX_BUF_SIZE, DMA_FROM_DEVICE);
 		priv->rx_phys[priv->rx_head] = 0;
 
@@ -534,9 +538,9 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
 		buf = netdev_alloc_frag(priv->rx_buf_size);
 		if (!buf)
 			goto done;
-		phys = dma_map_single(&ndev->dev, buf,
+		phys = dma_map_single(priv->dev, buf,
 				      RX_BUF_SIZE, DMA_FROM_DEVICE);
-		if (dma_mapping_error(&ndev->dev, phys))
+		if (dma_mapping_error(priv->dev, phys))
 			goto done;
 		priv->rx_buf[priv->rx_head] = buf;
 		priv->rx_phys[priv->rx_head] = phys;
@@ -557,8 +561,7 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
 	}
 	napi_complete_done(napi, rx);
 done:
-	/* clean up tx descriptors and start a new timer if necessary */
-	tx_remaining = hip04_tx_reclaim(ndev, false);
+	/* start a new timer if necessary */
 	if (rx < budget && tx_remaining)
 		hip04_start_tx_timer(priv);
 
@@ -640,9 +643,9 @@ static int hip04_mac_open(struct net_device *ndev)
 	for (i = 0; i < RX_DESC_NUM; i++) {
 		dma_addr_t phys;
 
-		phys = dma_map_single(&ndev->dev, priv->rx_buf[i],
+		phys = dma_map_single(priv->dev, priv->rx_buf[i],
 				      RX_BUF_SIZE, DMA_FROM_DEVICE);
-		if (dma_mapping_error(&ndev->dev, phys))
+		if (dma_mapping_error(priv->dev, phys))
 			return -EIO;
 
 		priv->rx_phys[i] = phys;
@@ -676,7 +679,7 @@ static int hip04_mac_stop(struct net_device *ndev)
 
 	for (i = 0; i < RX_DESC_NUM; i++) {
 		if (priv->rx_phys[i]) {
-			dma_unmap_single(&ndev->dev, priv->rx_phys[i],
+			dma_unmap_single(priv->dev, priv->rx_phys[i],
 					 RX_BUF_SIZE, DMA_FROM_DEVICE);
 			priv->rx_phys[i] = 0;
 		}
@@ -820,6 +823,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
 		return -ENOMEM;
 
 	priv = netdev_priv(ndev);
+	priv->dev = d;
 	priv->ndev = ndev;
 	platform_set_drvdata(pdev, ndev);
 	SET_NETDEV_DEV(ndev, &pdev->dev);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 6455511..9b608d2 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -4412,9 +4412,9 @@ static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
 	if (state->pause & MLO_PAUSE_RX)
 		ctrl0 |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
 
-	ctrl4 &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC;
-	ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC |
-		 MVPP22_XLG_CTRL4_EN_IDLE_CHECK;
+	ctrl4 &= ~(MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
+		   MVPP22_XLG_CTRL4_EN_IDLE_CHECK);
+	ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
 
 	writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG);
 	writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index f5cd953..45d9a5f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -1190,7 +1190,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
 	err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp);
 	if (err) {
 		en_err(priv, "Failed to allocate RSS indirection QP\n");
-		goto rss_err;
+		goto qp_alloc_err;
 	}
 
 	rss_map->indir_qp->event = mlx4_en_sqp_event;
@@ -1244,6 +1244,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
 		       MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp);
 	mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
 	mlx4_qp_free(mdev->dev, rss_map->indir_qp);
+qp_alloc_err:
 	kfree(rss_map->indir_qp);
 	rss_map->indir_qp = NULL;
 rss_err:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 45cdde6..a4be04d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -437,12 +437,6 @@ arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
 	return &arfs_t->rules_hash[bucket_idx];
 }
 
-static u8 arfs_get_ip_proto(const struct sk_buff *skb)
-{
-	return (skb->protocol == htons(ETH_P_IP)) ?
-		ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
-}
-
 static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
 					 u8 ip_proto, __be16 etype)
 {
@@ -599,31 +593,9 @@ static void arfs_handle_work(struct work_struct *work)
 	arfs_may_expire_flow(priv);
 }
 
-/* return L4 destination port from ip4/6 packets */
-static __be16 arfs_get_dst_port(const struct sk_buff *skb)
-{
-	char *transport_header;
-
-	transport_header = skb_transport_header(skb);
-	if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
-		return ((struct tcphdr *)transport_header)->dest;
-	return ((struct udphdr *)transport_header)->dest;
-}
-
-/* return L4 source port from ip4/6 packets */
-static __be16 arfs_get_src_port(const struct sk_buff *skb)
-{
-	char *transport_header;
-
-	transport_header = skb_transport_header(skb);
-	if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
-		return ((struct tcphdr *)transport_header)->source;
-	return ((struct udphdr *)transport_header)->source;
-}
-
 static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
 					 struct arfs_table *arfs_t,
-					 const struct sk_buff *skb,
+					 const struct flow_keys *fk,
 					 u16 rxq, u32 flow_id)
 {
 	struct arfs_rule *rule;
@@ -638,19 +610,19 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
 	INIT_WORK(&rule->arfs_work, arfs_handle_work);
 
 	tuple = &rule->tuple;
-	tuple->etype = skb->protocol;
+	tuple->etype = fk->basic.n_proto;
+	tuple->ip_proto = fk->basic.ip_proto;
 	if (tuple->etype == htons(ETH_P_IP)) {
-		tuple->src_ipv4 = ip_hdr(skb)->saddr;
-		tuple->dst_ipv4 = ip_hdr(skb)->daddr;
+		tuple->src_ipv4 = fk->addrs.v4addrs.src;
+		tuple->dst_ipv4 = fk->addrs.v4addrs.dst;
 	} else {
-		memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
+		memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
 		       sizeof(struct in6_addr));
-		memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
+		memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
 		       sizeof(struct in6_addr));
 	}
-	tuple->ip_proto = arfs_get_ip_proto(skb);
-	tuple->src_port = arfs_get_src_port(skb);
-	tuple->dst_port = arfs_get_dst_port(skb);
+	tuple->src_port = fk->ports.src;
+	tuple->dst_port = fk->ports.dst;
 
 	rule->flow_id = flow_id;
 	rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
@@ -661,37 +633,33 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
 	return rule;
 }
 
-static bool arfs_cmp_ips(struct arfs_tuple *tuple,
-			 const struct sk_buff *skb)
+static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk)
 {
-	if (tuple->etype == htons(ETH_P_IP) &&
-	    tuple->src_ipv4 == ip_hdr(skb)->saddr &&
-	    tuple->dst_ipv4 == ip_hdr(skb)->daddr)
-		return true;
-	if (tuple->etype == htons(ETH_P_IPV6) &&
-	    (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
-		     sizeof(struct in6_addr))) &&
-	    (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
-		     sizeof(struct in6_addr))))
-		return true;
+	if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst)
+		return false;
+	if (tuple->etype != fk->basic.n_proto)
+		return false;
+	if (tuple->etype == htons(ETH_P_IP))
+		return tuple->src_ipv4 == fk->addrs.v4addrs.src &&
+		       tuple->dst_ipv4 == fk->addrs.v4addrs.dst;
+	if (tuple->etype == htons(ETH_P_IPV6))
+		return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
+			       sizeof(struct in6_addr)) &&
+		       !memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
+			       sizeof(struct in6_addr));
 	return false;
 }
 
 static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
-					const struct sk_buff *skb)
+					const struct flow_keys *fk)
 {
 	struct arfs_rule *arfs_rule;
 	struct hlist_head *head;
-	__be16 src_port = arfs_get_src_port(skb);
-	__be16 dst_port = arfs_get_dst_port(skb);
 
-	head = arfs_hash_bucket(arfs_t, src_port, dst_port);
+	head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst);
 	hlist_for_each_entry(arfs_rule, head, hlist) {
-		if (arfs_rule->tuple.src_port == src_port &&
-		    arfs_rule->tuple.dst_port == dst_port &&
-		    arfs_cmp_ips(&arfs_rule->tuple, skb)) {
+		if (arfs_cmp(&arfs_rule->tuple, fk))
 			return arfs_rule;
-		}
 	}
 
 	return NULL;
@@ -704,20 +672,24 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
 	struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
 	struct arfs_table *arfs_t;
 	struct arfs_rule *arfs_rule;
+	struct flow_keys fk;
 
-	if (skb->protocol != htons(ETH_P_IP) &&
-	    skb->protocol != htons(ETH_P_IPV6))
+	if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
+		return -EPROTONOSUPPORT;
+
+	if (fk.basic.n_proto != htons(ETH_P_IP) &&
+	    fk.basic.n_proto != htons(ETH_P_IPV6))
 		return -EPROTONOSUPPORT;
 
 	if (skb->encapsulation)
 		return -EPROTONOSUPPORT;
 
-	arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
+	arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto);
 	if (!arfs_t)
 		return -EPROTONOSUPPORT;
 
 	spin_lock_bh(&arfs->arfs_lock);
-	arfs_rule = arfs_find_rule(arfs_t, skb);
+	arfs_rule = arfs_find_rule(arfs_t, &fk);
 	if (arfs_rule) {
 		if (arfs_rule->rxq == rxq_index) {
 			spin_unlock_bh(&arfs->arfs_lock);
@@ -725,8 +697,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
 		}
 		arfs_rule->rxq = rxq_index;
 	} else {
-		arfs_rule = arfs_alloc_rule(priv, arfs_t, skb,
-					    rxq_index, flow_id);
+		arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id);
 		if (!arfs_rule) {
 			spin_unlock_bh(&arfs->arfs_lock);
 			return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 792bb8b..2b9350f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1083,6 +1083,9 @@ static int mlx5e_set_pauseparam(struct net_device *netdev,
 	struct mlx5_core_dev *mdev = priv->mdev;
 	int err;
 
+	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+		return -EOPNOTSUPP;
+
 	if (pauseparam->autoneg)
 		return -EINVAL;
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index b22f464..f9e4750 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -939,7 +939,7 @@ static int qed_int_deassertion(struct qed_hwfn  *p_hwfn,
 						snprintf(bit_name, 30,
 							 p_aeu->bit_name, num);
 					else
-						strncpy(bit_name,
+						strlcpy(bit_name,
 							p_aeu->bit_name, 30);
 
 					/* We now need to pass bitmask in its
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 13802b8..909422d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -442,7 +442,7 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
 	/* Vendor specific information */
 	dev->vendor_id = cdev->vendor_id;
 	dev->vendor_part_id = cdev->device_id;
-	dev->hw_ver = 0;
+	dev->hw_ver = cdev->chip_rev;
 	dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
 		      (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
 
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index e6ce895..72f3e4b 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -699,6 +699,16 @@ int rmnet_get_powersave_notif(void *port)
 	return ((struct rmnet_port *)port)->data_format & RMNET_FORMAT_PS_NOTIF;
 }
 EXPORT_SYMBOL(rmnet_get_powersave_notif);
+
+struct net_device *rmnet_get_real_dev(void *port)
+{
+	if (port)
+		return ((struct rmnet_port *)port)->dev;
+
+	return NULL;
+}
+EXPORT_SYMBOL(rmnet_get_real_dev);
+
 #endif
 
 /* Startup/Shutdown */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index d0e6e15..48cf5e2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -88,6 +88,8 @@ static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
 	u32 value;
 
 	base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
+	if (queue >= 4)
+		queue -= 4;
 
 	value = readl(ioaddr + base_register);
 
@@ -105,6 +107,8 @@ static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
 	u32 value;
 
 	base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
+	if (queue >= 4)
+		queue -= 4;
 
 	value = readl(ioaddr + base_register);
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index d182f82..870302a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -106,6 +106,8 @@ static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
 	u32 value, reg;
 
 	reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
+	if (queue >= 4)
+		queue -= 4;
 
 	value = readl(ioaddr + reg);
 	value &= ~XGMAC_PSRQ(queue);
@@ -169,6 +171,8 @@ static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
 	u32 value, reg;
 
 	reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
+	if (queue >= 4)
+		queue -= 4;
 
 	value = readl(ioaddr + reg);
 	value &= ~XGMAC_QxMDMACH(queue);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 58ea18a..37c0bc6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -37,7 +37,7 @@ static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv,
 		entry = &priv->tc_entries[i];
 		if (!entry->in_use && !first && free)
 			first = entry;
-		if (entry->handle == loc && !free)
+		if ((entry->handle == loc) && !free && !entry->is_frag)
 			dup = entry;
 	}
 
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
index 491efc1..7278eca 100644
--- a/drivers/net/phy/phy_led_triggers.c
+++ b/drivers/net/phy/phy_led_triggers.c
@@ -58,8 +58,9 @@ void phy_led_trigger_change_speed(struct phy_device *phy)
 		if (!phy->last_triggered)
 			led_trigger_event(&phy->led_link_trigger->trigger,
 					  LED_FULL);
+		else
+			led_trigger_event(&phy->last_triggered->trigger, LED_OFF);
 
-		led_trigger_event(&phy->last_triggered->trigger, LED_OFF);
 		led_trigger_event(&plt->trigger, LED_FULL);
 		phy->last_triggered = plt;
 	}
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index dc30f11..3feb49b 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1011,6 +1011,8 @@ static void __team_compute_features(struct team *team)
 
 	team->dev->vlan_features = vlan_features;
 	team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
+				     NETIF_F_HW_VLAN_CTAG_TX |
+				     NETIF_F_HW_VLAN_STAG_TX |
 				     NETIF_F_GSO_UDP_L4;
 	team->dev->hard_header_len = max_hard_header_len;
 
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index f4247b2..b7a0df9 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -285,7 +285,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
 static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
 {
 	int i;
-	__u8 tmp;
+	__u8 tmp = 0;
 	__le16 retdatai;
 	int ret;
 
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 128c8a3..51017c6 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1231,6 +1231,7 @@ static const struct usb_device_id products[] = {
 	{QMI_FIXED_INTF(0x2001, 0x7e35, 4)},	/* D-Link DWM-222 */
 	{QMI_FIXED_INTF(0x2020, 0x2031, 4)},	/* Olicard 600 */
 	{QMI_FIXED_INTF(0x2020, 0x2033, 4)},	/* BroadMobi BM806U */
+	{QMI_FIXED_INTF(0x2020, 0x2060, 4)},	/* BroadMobi BM818 */
 	{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
 	{QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
 	{QMI_FIXED_INTF(0x1199, 0x68a2, 8)},	/* Sierra Wireless MC7710 in QMI mode */
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index ba15afd..05d8c8e 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -18,6 +18,22 @@
 #define WIL_BRD_SUFFIX_CN "CN"
 #define WIL_BRD_SUFFIX_FCC "FCC"
 
+#define WIL_EDMG_CHANNEL_9_SUBCHANNELS	(BIT(0) | BIT(1))
+#define WIL_EDMG_CHANNEL_10_SUBCHANNELS	(BIT(1) | BIT(2))
+#define WIL_EDMG_CHANNEL_11_SUBCHANNELS	(BIT(2) | BIT(3))
+
+/* WIL_EDMG_BW_CONFIGURATION define the allowed channel bandwidth
+ * configurations as defined by IEEE 802.11 section 9.4.2.251, Table 13.
+ * The value 5 allowing CB1 and CB2 of adjacent channels.
+ */
+#define WIL_EDMG_BW_CONFIGURATION 5
+
+/* WIL_EDMG_CHANNELS is a bitmap that indicates the 2.16 GHz channel(s) that
+ * are allowed to be used for EDMG transmissions in the BSS as defined by
+ * IEEE 802.11 section 9.4.2.251.
+ */
+#define WIL_EDMG_CHANNELS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
 bool disable_ap_sme;
 module_param(disable_ap_sme, bool, 0444);
 MODULE_PARM_DESC(disable_ap_sme, " let user space handle AP mode SME");
@@ -56,6 +72,39 @@ static struct ieee80211_channel wil_60ghz_channels[] = {
 	CHAN60G(4, 0),
 };
 
+/* Rx channel bonding mode */
+enum wil_rx_cb_mode {
+	WIL_RX_CB_MODE_DMG,
+	WIL_RX_CB_MODE_EDMG,
+	WIL_RX_CB_MODE_WIDE,
+};
+
+static int wil_rx_cb_mode_to_n_bonded(u8 cb_mode)
+{
+	switch (cb_mode) {
+	case WIL_RX_CB_MODE_DMG:
+	case WIL_RX_CB_MODE_EDMG:
+		return 1;
+	case WIL_RX_CB_MODE_WIDE:
+		return 2;
+	default:
+		return 1;
+	}
+}
+
+static int wil_tx_cb_mode_to_n_bonded(u8 cb_mode)
+{
+	switch (cb_mode) {
+	case WMI_TX_MODE_DMG:
+	case WMI_TX_MODE_EDMG_CB1:
+		return 1;
+	case WMI_TX_MODE_EDMG_CB2:
+		return 2;
+	default:
+		return 1;
+	}
+}
+
 static void
 wil_memdup_ie(u8 **pdst, size_t *pdst_len, const u8 *src, size_t src_len)
 {
@@ -167,6 +216,13 @@ void update_supported_bands(struct wil6210_priv *wil)
 
 	wiphy->bands[NL80211_BAND_60GHZ]->n_channels =
 						wil_num_supported_channels(wil);
+
+	if (test_bit(WMI_FW_CAPABILITY_CHANNEL_BONDING, wil->fw_capabilities)) {
+		wiphy->bands[NL80211_BAND_60GHZ]->edmg_cap.channels =
+							WIL_EDMG_CHANNELS;
+		wiphy->bands[NL80211_BAND_60GHZ]->edmg_cap.bw_config =
+						      WIL_EDMG_BW_CONFIGURATION;
+	}
 }
 
 /* Vendor id to be used in vendor specific command and events
@@ -593,6 +649,7 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
 	} __packed reply;
 	struct wil_net_stats *stats = &wil->sta[cid].stats;
 	int rc;
+	u8 txflag = RATE_INFO_FLAGS_DMG;
 
 	memset(&reply, 0, sizeof(reply));
 
@@ -606,7 +663,8 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
 		    "  MCS %d TSF 0x%016llx\n"
 		    "  BF status 0x%08x RSSI %d SQI %d%%\n"
 		    "  Tx Tpt %d goodput %d Rx goodput %d\n"
-		    "  Sectors(rx:tx) my %d:%d peer %d:%d\n""}\n",
+		    "  Sectors(rx:tx) my %d:%d peer %d:%d\n"
+		    "  Tx mode %d}\n",
 		    cid, vif->mid, le16_to_cpu(reply.evt.bf_mcs),
 		    le64_to_cpu(reply.evt.tsf), reply.evt.status,
 		    reply.evt.rssi,
@@ -617,7 +675,8 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
 		    le16_to_cpu(reply.evt.my_rx_sector),
 		    le16_to_cpu(reply.evt.my_tx_sector),
 		    le16_to_cpu(reply.evt.other_rx_sector),
-		    le16_to_cpu(reply.evt.other_tx_sector));
+		    le16_to_cpu(reply.evt.other_tx_sector),
+		    reply.evt.tx_mode);
 
 	sinfo->generation = wil->sinfo_gen;
 
@@ -630,9 +689,16 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
 			BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC) |
 			BIT_ULL(NL80211_STA_INFO_TX_FAILED);
 
-	sinfo->txrate.flags = RATE_INFO_FLAGS_60G;
+	if (wil->use_enhanced_dma_hw && reply.evt.tx_mode != WMI_TX_MODE_DMG)
+		txflag = RATE_INFO_FLAGS_EDMG;
+
+	sinfo->txrate.flags = txflag;
 	sinfo->txrate.mcs = le16_to_cpu(reply.evt.bf_mcs);
 	sinfo->rxrate.mcs = stats->last_mcs_rx;
+	sinfo->txrate.n_bonded_ch =
+				  wil_tx_cb_mode_to_n_bonded(reply.evt.tx_mode);
+	sinfo->rxrate.n_bonded_ch =
+			     wil_rx_cb_mode_to_n_bonded(stats->last_cb_mode_rx);
 	sinfo->rx_bytes = stats->rx_bytes;
 	sinfo->rx_packets = stats->rx_packets;
 	sinfo->rx_dropped_misc = stats->rx_dropped;
@@ -1310,6 +1376,33 @@ static int wil_ft_connect(struct wiphy *wiphy,
 	return rc;
 }
 
+static int wil_get_wmi_edmg_channel(struct wil6210_priv *wil, u8 edmg_bw_config,
+				    u8 edmg_channels, u8 *wmi_ch)
+{
+	if (!edmg_bw_config) {
+		*wmi_ch = 0;
+		return 0;
+	} else if (edmg_bw_config == WIL_EDMG_BW_CONFIGURATION) {
+		/* convert from edmg channel bitmap into edmg channel number */
+		switch (edmg_channels) {
+		case WIL_EDMG_CHANNEL_9_SUBCHANNELS:
+			return wil_spec2wmi_ch(9, wmi_ch);
+		case WIL_EDMG_CHANNEL_10_SUBCHANNELS:
+			return wil_spec2wmi_ch(10, wmi_ch);
+		case WIL_EDMG_CHANNEL_11_SUBCHANNELS:
+			return wil_spec2wmi_ch(11, wmi_ch);
+		default:
+			wil_err(wil, "Unsupported edmg channel bitmap 0x%x\n",
+				edmg_channels);
+			return -EINVAL;
+		}
+	} else {
+		wil_err(wil, "Unsupported EDMG BW configuration %d\n",
+			edmg_bw_config);
+		return -EINVAL;
+	}
+}
+
 static int wil_cfg80211_connect(struct wiphy *wiphy,
 				struct net_device *ndev,
 				struct cfg80211_connect_params *sme)
@@ -1455,7 +1548,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
 	memcpy(conn.ssid, ssid_eid+2, conn.ssid_len);
 	conn.channel = ch - 1;
 
-	if (test_bit(WMI_FW_CAPABILITY_CHANNEL_BONDING, wil->fw_capabilities))
+	if (test_bit(WMI_FW_CAPABILITY_CHANNEL_BONDING, wil->fw_capabilities)) {
 		if (wil->force_edmg_channel) {
 			rc = wil_spec2wmi_ch(wil->force_edmg_channel,
 					     &conn.edmg_channel);
@@ -1463,7 +1556,15 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
 				wil_err(wil,
 					"wmi channel for channel %d not found",
 					wil->force_edmg_channel);
+		} else {
+			rc = wil_get_wmi_edmg_channel(wil,
+						      sme->edmg.bw_config,
+						      sme->edmg.channels,
+						      &conn.edmg_channel);
+			if (rc < 0)
+				return rc;
 		}
+	}
 
 	ether_addr_copy(conn.bssid, bss->bssid);
 	ether_addr_copy(conn.dst_mac, bss->bssid);
@@ -2032,7 +2133,7 @@ static int _wil_cfg80211_set_ies(struct wil6210_vif *vif,
 static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
 				  struct net_device *ndev,
 				  const u8 *ssid, size_t ssid_len, u32 privacy,
-				  int bi, u8 chan,
+				  int bi, u8 chan, u8 wmi_edmg_channel,
 				  struct cfg80211_beacon_data *bcon,
 				  u8 hidden_ssid, u32 pbss)
 {
@@ -2049,7 +2150,8 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
 	if (pbss)
 		wmi_nettype = WMI_NETTYPE_P2P;
 
-	wil_dbg_misc(wil, "start_ap: mid=%d, is_go=%d\n", vif->mid, is_go);
+	wil_dbg_misc(wil, "start_ap: mid=%d, is_go=%d ap_ps=%d\n", vif->mid,
+		     is_go, wil->ap_ps);
 	if (is_go && !pbss) {
 		wil_err(wil, "P2P GO must be in PBSS\n");
 		return -ENOTSUPP;
@@ -2110,6 +2212,7 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
 
 	vif->privacy = privacy;
 	vif->channel = chan;
+	vif->wmi_edmg_channel = wmi_edmg_channel;
 	vif->hidden_ssid = hidden_ssid;
 	vif->pbss = pbss;
 	vif->bi = bi;
@@ -2131,7 +2234,8 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
 	}
 
 
-	rc = wmi_pcp_start(vif, bi, wmi_nettype, chan, hidden_ssid, is_go);
+	rc = wmi_pcp_start(vif, bi, wmi_nettype, chan, wmi_edmg_channel,
+			   hidden_ssid, is_go);
 	if (rc)
 		goto err_pcp_start;
 
@@ -2139,6 +2243,14 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
 	if (rc)
 		goto err_bcast;
 
+	if (test_bit(WMI_FW_CAPABILITY_AP_POWER_MANAGEMENT,
+		     wil->fw_capabilities)) {
+		enum wmi_ps_profile_type ps_profile = wil->ap_ps ?
+			wil->ps_profile : WMI_PS_PROFILE_TYPE_PS_DISABLED;
+
+		wil_ps_update(wil, ps_profile);
+	}
+
 	goto out; /* success */
 
 err_bcast:
@@ -2188,7 +2300,8 @@ void wil_cfg80211_ap_recovery(struct wil6210_priv *wil)
 		rc = _wil_cfg80211_start_ap(wiphy, ndev,
 					    vif->ssid, vif->ssid_len,
 					    vif->privacy, vif->bi,
-					    vif->channel, &bcon,
+					    vif->channel,
+					    vif->wmi_edmg_channel, &bcon,
 					    vif->hidden_ssid, vif->pbss);
 		if (rc) {
 			wil_err(wil, "vif %d recovery failed (%d)\n", i, rc);
@@ -2238,7 +2351,8 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
 		rc = _wil_cfg80211_start_ap(wiphy, ndev, vif->ssid,
 					    vif->ssid_len, privacy,
 					    wdev->beacon_interval,
-					    vif->channel, bcon,
+					    vif->channel,
+					    vif->wmi_edmg_channel, bcon,
 					    vif->hidden_ssid,
 					    vif->pbss);
 	} else {
@@ -2257,10 +2371,17 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
 	struct ieee80211_channel *channel = info->chandef.chan;
 	struct cfg80211_beacon_data *bcon = &info->beacon;
 	struct cfg80211_crypto_settings *crypto = &info->crypto;
+	u8 wmi_edmg_channel;
 	u8 hidden_ssid;
 
 	wil_dbg_misc(wil, "start_ap\n");
 
+	rc = wil_get_wmi_edmg_channel(wil, info->chandef.edmg.bw_config,
+				      info->chandef.edmg.channels,
+				      &wmi_edmg_channel);
+	if (rc < 0)
+		return rc;
+
 	if (!channel) {
 		wil_err(wil, "AP: No channel???\n");
 		return -EINVAL;
@@ -2300,7 +2421,8 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
 	rc = _wil_cfg80211_start_ap(wiphy, ndev,
 				    info->ssid, info->ssid_len, info->privacy,
 				    info->beacon_interval, channel->hw_value,
-				    bcon, hidden_ssid, info->pbss);
+				    wmi_edmg_channel, bcon, hidden_ssid,
+				    info->pbss);
 
 	return rc;
 }
@@ -3663,9 +3785,7 @@ static int wil_nl_60g_handle_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
 				     "NL_60G_GEN_FW_RESET, resetting...\n");
 
 			mutex_lock(&wil->mutex);
-			down_write(&wil->mem_lock);
 			rc = wil_reset(wil, true);
-			up_write(&wil->mem_lock);
 			mutex_unlock(&wil->mutex);
 
 			break;
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 7a33dab..36ca937 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -2639,6 +2639,7 @@ static const struct dbg_off dbg_wil_off[] = {
 	WIL_FIELD(rx_buff_id_count, 0644,	doff_u32),
 	WIL_FIELD(amsdu_en, 0644,	doff_u8),
 	WIL_FIELD(force_edmg_channel, 0644,	doff_u8),
+	WIL_FIELD(ap_ps, 0644, doff_u8),
 	{},
 };
 
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 1c2227c..e9b7b2d 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -1307,6 +1307,8 @@ void wil_refresh_fw_capabilities(struct wil6210_priv *wil)
 	}
 
 	update_supported_bands(wil);
+
+	wil->ap_ps = test_bit(WIL_PLATFORM_CAPA_AP_PS, wil->platform_capa);
 }
 
 void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
@@ -1745,6 +1747,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 	/* Disable device led before reset*/
 	wmi_led_cfg(wil, false);
 
+	down_write(&wil->mem_lock);
+
 	/* prevent NAPI from being scheduled and prevent wmi commands */
 	mutex_lock(&wil->wmi_mutex);
 	if (test_bit(wil_status_suspending, wil->status))
@@ -1800,6 +1804,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 
 		if  (wil->secured_boot) {
 			wil_err(wil, "secured boot is not supported\n");
+			up_write(&wil->mem_lock);
 			return -ENOTSUPP;
 		}
 
@@ -1830,6 +1835,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 
 	clear_bit(wil_status_resetting, wil->status);
 
+	up_write(&wil->mem_lock);
+
 	if (load_fw) {
 		wil_unmask_irq(wil);
 
@@ -1897,6 +1904,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 	return rc;
 
 out:
+	up_write(&wil->mem_lock);
 	clear_bit(wil_status_resetting, wil->status);
 	return rc;
 }
@@ -1922,9 +1930,7 @@ int __wil_up(struct wil6210_priv *wil)
 
 	WARN_ON(!mutex_is_locked(&wil->mutex));
 
-	down_write(&wil->mem_lock);
 	rc = wil_reset(wil, true);
-	up_write(&wil->mem_lock);
 	if (rc)
 		return rc;
 
@@ -2017,9 +2023,7 @@ int __wil_down(struct wil6210_priv *wil)
 	wil_abort_scan_all_vifs(wil, false);
 	mutex_unlock(&wil->vif_mutex);
 
-	down_write(&wil->mem_lock);
 	rc = wil_reset(wil, false);
-	up_write(&wil->mem_lock);
 
 	return rc;
 }
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
index 273e57f..a71c0dc 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -1080,6 +1080,8 @@ static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil,
 		stats->last_mcs_rx = wil_rx_status_get_mcs(msg);
 		if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
 			stats->rx_per_mcs[stats->last_mcs_rx]++;
+
+		stats->last_cb_mode_rx  = wil_rx_status_get_cb_mode(msg);
 	}
 
 	if (!wil->use_rx_hw_reordering && !wil->use_compressed_rx_status &&
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h
index e9ab926..af6de29 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.h
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h
@@ -354,6 +354,12 @@ static inline u8 wil_rx_status_get_mcs(void *msg)
 			    16, 21);
 }
 
+static inline u8 wil_rx_status_get_cb_mode(void *msg)
+{
+	return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1,
+			    22, 23);
+}
+
 static inline u16 wil_rx_status_get_flow_id(void *msg)
 {
 	return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 5120b46..8ebc76a 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -604,6 +604,7 @@ struct wil_net_stats {
 	unsigned long	rx_amsdu_error; /* eDMA specific */
 	unsigned long	rx_csum_err;
 	u16 last_mcs_rx;
+	u8 last_cb_mode_rx;
 	u64 rx_per_mcs[WIL_MCS_MAX + 1];
 	u32 ft_roams; /* relevant in STA mode */
 };
@@ -866,6 +867,7 @@ struct wil6210_vif {
 	DECLARE_BITMAP(status, wil_vif_status_last);
 	u32 privacy; /* secure connection? */
 	u16 channel; /* relevant in AP mode */
+	u8 wmi_edmg_channel; /* relevant in AP mode */
 	u8 hidden_ssid; /* relevant in AP mode */
 	u32 ap_isolate; /* no intra-BSS communication */
 	bool pbss;
@@ -1041,6 +1043,7 @@ struct wil6210_priv {
 	void *platform_handle;
 	struct wil_platform_ops platform_ops;
 	bool keep_radio_on_during_sleep;
+	u8 ap_ps; /* AP mode power save enabled */
 
 	struct pmc_ctx pmc;
 
@@ -1396,7 +1399,7 @@ void wil_p2p_wdev_free(struct wil6210_priv *wil);
 
 int wmi_set_mac_address(struct wil6210_priv *wil, void *addr);
 int wmi_pcp_start(struct wil6210_vif *vif, int bi, u8 wmi_nettype, u8 chan,
-		  u8 hidden_ssid, u8 is_go);
+		  u8 edmg_chan, u8 hidden_ssid, u8 is_go);
 int wmi_pcp_stop(struct wil6210_vif *vif);
 int wmi_led_cfg(struct wil6210_priv *wil, bool enable);
 int wmi_abort_scan(struct wil6210_vif *vif);
@@ -1506,6 +1509,7 @@ int wmi_rbufcap_cfg(struct wil6210_priv *wil, bool enable, u16 threshold);
 
 int wil_wmi2spec_ch(u8 wmi_ch, u8 *spec_ch);
 int wil_spec2wmi_ch(u8 spec_ch, u8 *wmi_ch);
+void wil_update_supported_bands(struct wil6210_priv *wil);
 
 int reverse_memcmp(const void *cs, const void *ct, size_t count);
 
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
index 1332eb8..89c12cb 100644
--- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
+++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
@@ -46,7 +46,7 @@ static int wil_fw_get_crash_dump_bounds(struct wil6210_priv *wil,
 
 int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
 {
-	int i, rc;
+	int i;
 	const struct fw_map *map;
 	void *data;
 	u32 host_min, dump_size, offset, len;
@@ -62,9 +62,15 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
 		return -EINVAL;
 	}
 
-	rc = wil_mem_access_lock(wil);
-	if (rc)
-		return rc;
+	down_write(&wil->mem_lock);
+
+	if (test_bit(wil_status_suspending, wil->status) ||
+	    test_bit(wil_status_suspended, wil->status)) {
+		wil_err(wil,
+			"suspend/resume in progress. cannot copy crash dump\n");
+		up_write(&wil->mem_lock);
+		return -EBUSY;
+	}
 
 	/* copy to crash dump area */
 	for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
@@ -84,7 +90,8 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
 		wil_memcpy_fromio_32((void * __force)(dest + offset),
 				     (const void __iomem * __force)data, len);
 	}
-	wil_mem_access_unlock(wil);
+
+	up_write(&wil->mem_lock);
 
 	return 0;
 }
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
index d381649..e6730af 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.h
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -32,6 +32,7 @@ enum wil_platform_capa {
 	WIL_PLATFORM_CAPA_T_PWR_ON_0 = 1,
 	WIL_PLATFORM_CAPA_EXT_CLK = 2,
 	WIL_PLATFORM_CAPA_SMMU = 3,
+	WIL_PLATFORM_CAPA_AP_PS = 4,
 	WIL_PLATFORM_CAPA_MAX,
 };
 
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 3a10bd8..ec7444b 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -1447,6 +1447,10 @@ wmi_evt_sched_scan_result(struct wil6210_vif *vif, int id, void *d, int len)
 	__le16 fc;
 	u32 d_len;
 	struct cfg80211_bss *bss;
+	struct cfg80211_inform_bss bss_data = {
+		.scan_width = NL80211_BSS_CHAN_WIDTH_20,
+		.boottime_ns = ktime_to_ns(ktime_get_boottime()),
+	};
 
 	if (flen < 0) {
 		wil_err(wil, "sched scan result event too short, len %d\n",
@@ -1489,8 +1493,10 @@ wmi_evt_sched_scan_result(struct wil6210_vif *vif, int id, void *d, int len)
 		return;
 	}
 
-	bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame,
-					d_len, signal, GFP_KERNEL);
+	bss_data.signal = signal;
+	bss_data.chan = channel;
+	bss = cfg80211_inform_bss_frame_data(wiphy, &bss_data, rx_mgmt_frame,
+					     d_len, GFP_KERNEL);
 	if (bss) {
 		wil_dbg_wmi(wil, "Added BSS %pM\n", rx_mgmt_frame->bssid);
 		cfg80211_put_bss(wiphy, bss);
@@ -2253,8 +2259,8 @@ int wmi_rbufcap_cfg(struct wil6210_priv *wil, bool enable, u16 threshold)
 	return rc;
 }
 
-int wmi_pcp_start(struct wil6210_vif *vif,
-		  int bi, u8 wmi_nettype, u8 chan, u8 hidden_ssid, u8 is_go)
+int wmi_pcp_start(struct wil6210_vif *vif, int bi, u8 wmi_nettype,
+		  u8 chan, u8 wmi_edmg_chan, u8 hidden_ssid, u8 is_go)
 {
 	struct wil6210_priv *wil = vif_to_wil(vif);
 	int rc;
@@ -2264,6 +2270,7 @@ int wmi_pcp_start(struct wil6210_vif *vif,
 		.network_type = wmi_nettype,
 		.disable_sec_offload = 1,
 		.channel = chan - 1,
+		.edmg_channel = wmi_edmg_chan,
 		.pcp_max_assoc_sta = max_assoc_sta,
 		.hidden_ssid = hidden_ssid,
 		.is_go = is_go,
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index ceb0c5b..fc28f4b 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -99,6 +99,7 @@ enum wmi_fw_capability {
 	WMI_FW_CAPABILITY_CHANNEL_4			= 26,
 	WMI_FW_CAPABILITY_IPA				= 27,
 	WMI_FW_CAPABILITY_TEMPERATURE_ALL_RF		= 30,
+	WMI_FW_CAPABILITY_AP_POWER_MANAGEMENT		= 32,
 	WMI_FW_CAPABILITY_MAX,
 };
 
diff --git a/drivers/net/wireless/cnss2/debug.c b/drivers/net/wireless/cnss2/debug.c
index 6f0efc7..ec75ff6 100644
--- a/drivers/net/wireless/cnss2/debug.c
+++ b/drivers/net/wireless/cnss2/debug.c
@@ -615,6 +615,9 @@ static int cnss_show_quirks_state(struct seq_file *s,
 		case DISABLE_DRV:
 			seq_puts(s, "DISABLE_DRV");
 			continue;
+		case DISABLE_IO_COHERENCY:
+			seq_puts(s, "DISABLE_IO_COHERENCY");
+			continue;
 		}
 
 		seq_printf(s, "UNKNOWN-%d", i);
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index 0215f59..8e76123 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -31,7 +31,7 @@
 #define CNSS_EVENT_PENDING		2989
 #define COLD_BOOT_CAL_SHUTDOWN_DELAY_MS	50
 
-#define CNSS_QUIRKS_DEFAULT		0
+#define CNSS_QUIRKS_DEFAULT		BIT(DISABLE_IO_COHERENCY)
 #ifdef CONFIG_CNSS_EMULATION
 #define CNSS_MHI_TIMEOUT_DEFAULT	90000
 #else
@@ -1207,6 +1207,26 @@ int cnss_force_collect_rddm(struct device *dev)
 }
 EXPORT_SYMBOL(cnss_force_collect_rddm);
 
+int cnss_qmi_send_get(struct device *dev)
+{
+	return 0;
+}
+EXPORT_SYMBOL(cnss_qmi_send_get);
+
+int cnss_qmi_send_put(struct device *dev)
+{
+	return 0;
+}
+EXPORT_SYMBOL(cnss_qmi_send_put);
+
+int cnss_qmi_send(struct device *dev, int type, void *cmd,
+		  int cmd_len, void *cb_ctx,
+		  int (*cb)(void *ctx, void *event, int event_len))
+{
+	return -EINVAL;
+}
+EXPORT_SYMBOL(cnss_qmi_send);
+
 static int cnss_cold_boot_cal_start_hdlr(struct cnss_plat_data *plat_priv)
 {
 	int ret = 0;
diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h
index d11f099..65d7330 100644
--- a/drivers/net/wireless/cnss2/main.h
+++ b/drivers/net/wireless/cnss2/main.h
@@ -239,6 +239,7 @@ enum cnss_debug_quirks {
 	FBC_BYPASS,
 	ENABLE_DAEMON_SUPPORT,
 	DISABLE_DRV,
+	DISABLE_IO_COHERENCY,
 };
 
 enum cnss_bdf_type {
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index d661e5e..245446f 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -1860,9 +1860,16 @@ int cnss_pci_unregister_driver_hdlr(struct cnss_pci_data *pci_priv)
 static bool cnss_pci_is_drv_supported(struct cnss_pci_data *pci_priv)
 {
 	struct pci_dev *root_port = pci_find_pcie_root_port(pci_priv->pci_dev);
-	struct device_node *root_of_node = root_port->dev.of_node;
+	struct device_node *root_of_node;
 	bool drv_supported = false;
 
+	if (!root_port) {
+		cnss_pr_err("PCIe DRV is not supported as root port is null\n");
+		return drv_supported;
+	}
+
+	root_of_node = root_port->dev.of_node;
+
 	if (root_of_node->parent)
 		drv_supported = of_property_read_bool(root_of_node->parent,
 						      "qcom,drv-supported");
@@ -2895,9 +2902,14 @@ int cnss_smmu_map(struct device *dev,
 		  phys_addr_t paddr, uint32_t *iova_addr, size_t size)
 {
 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
+	struct cnss_plat_data *plat_priv;
 	unsigned long iova;
 	size_t len;
 	int ret = 0;
+	int flag = IOMMU_READ | IOMMU_WRITE;
+	struct pci_dev *root_port;
+	struct device_node *root_of_node;
+	bool dma_coherent = false;
 
 	if (!pci_priv)
 		return -ENODEV;
@@ -2908,6 +2920,8 @@ int cnss_smmu_map(struct device *dev,
 		return -EINVAL;
 	}
 
+	plat_priv = pci_priv->plat_priv;
+
 	len = roundup(size + paddr - rounddown(paddr, PAGE_SIZE), PAGE_SIZE);
 	iova = roundup(pci_priv->smmu_iova_ipa_start, PAGE_SIZE);
 
@@ -2920,9 +2934,23 @@ int cnss_smmu_map(struct device *dev,
 		return -ENOMEM;
 	}
 
+	if (!test_bit(DISABLE_IO_COHERENCY,
+		      &plat_priv->ctrl_params.quirks)) {
+		root_port = pci_find_pcie_root_port(pci_priv->pci_dev);
+		root_of_node = root_port->dev.of_node;
+		if (root_of_node->parent) {
+			dma_coherent =
+				of_property_read_bool(root_of_node->parent,
+						      "dma-coherent");
+			cnss_pr_dbg("dma-coherent is %s\n",
+				    dma_coherent ? "enabled" : "disabled");
+			if (dma_coherent)
+				flag |= IOMMU_CACHE;
+		}
+	}
+
 	ret = iommu_map(pci_priv->iommu_domain, iova,
-			rounddown(paddr, PAGE_SIZE), len,
-			IOMMU_READ | IOMMU_WRITE);
+			rounddown(paddr, PAGE_SIZE), len, flag);
 	if (ret) {
 		cnss_pr_err("PA to IOVA mapping failed, ret %d\n", ret);
 		return ret;
diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c
index 3513d15..02178b3 100644
--- a/drivers/net/wireless/cnss2/qmi.c
+++ b/drivers/net/wireless/cnss2/qmi.c
@@ -489,7 +489,7 @@ int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv,
 	struct wlfw_bdf_download_resp_msg_v01 *resp;
 	struct qmi_txn txn;
 	char filename[MAX_BDF_FILE_NAME];
-	const struct firmware *fw_entry;
+	const struct firmware *fw_entry = NULL;
 	const u8 *temp;
 	unsigned int remaining;
 	int ret = 0;
@@ -539,7 +539,7 @@ int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv,
 		req->data_valid = 1;
 		req->end_valid = 1;
 		req->bdf_type_valid = 1;
-		req->bdf_type = plat_priv->ctrl_params.bdf_type;
+		req->bdf_type = bdf_type;
 
 		if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) {
 			req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01;
@@ -594,7 +594,7 @@ int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv,
 	return 0;
 
 err_send:
-	if (plat_priv->ctrl_params.bdf_type != CNSS_BDF_DUMMY)
+	if (bdf_type != CNSS_BDF_DUMMY)
 		release_firmware(fw_entry);
 err_req_fw:
 	if (bdf_type != CNSS_BDF_REGDB)
@@ -775,12 +775,12 @@ int cnss_wlfw_wlan_cfg_send_sync(struct cnss_plat_data *plat_priv,
 	u32 i;
 	int ret = 0;
 
-	cnss_pr_dbg("Sending WLAN config message, state: 0x%lx\n",
-		    plat_priv->driver_state);
-
 	if (!plat_priv)
 		return -ENODEV;
 
+	cnss_pr_dbg("Sending WLAN config message, state: 0x%lx\n",
+		    plat_priv->driver_state);
+
 	req = kzalloc(sizeof(*req), GFP_KERNEL);
 	if (!req)
 		return -ENOMEM;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 8b7d70e..3fe7605 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -724,7 +724,7 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
 
 	for (i = 0; i < n_profiles; i++) {
 		/* the tables start at element 3 */
-		static int pos = 3;
+		int pos = 3;
 
 		/* The EWRD profiles officially go from 2 to 4, but we
 		 * save them in sar_profiles[1-3] (because we don't
@@ -836,6 +836,22 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
 	return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
 }
 
+static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
+{
+	/*
+	 * The GEO_TX_POWER_LIMIT command is not supported on earlier
+	 * firmware versions.  Unfortunately, we don't have a TLV API
+	 * flag to rely on, so rely on the major version which is in
+	 * the first byte of ucode_ver.  This was implemented
+	 * initially on version 38 and then backported to 36, 29 and
+	 * 17.
+	 */
+	return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
+	       IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 36 ||
+	       IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 ||
+	       IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17;
+}
+
 int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
 {
 	struct iwl_geo_tx_power_profiles_resp *resp;
@@ -851,6 +867,9 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
 		.data = { &geo_cmd },
 	};
 
+	if (!iwl_mvm_sar_geo_support(mvm))
+		return -EOPNOTSUPP;
+
 	ret = iwl_mvm_send_cmd(mvm, &cmd);
 	if (ret) {
 		IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
@@ -876,13 +895,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
 	int ret, i, j;
 	u16 cmd_wide_id =  WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
 
-	/*
-	 * This command is not supported on earlier firmware versions.
-	 * Unfortunately, we don't have a TLV API flag to rely on, so
-	 * rely on the major version which is in the first byte of
-	 * ucode_ver.
-	 */
-	if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
+	if (!iwl_mvm_sar_geo_support(mvm))
 		return 0;
 
 	ret = iwl_mvm_sar_get_wgds_table(mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 93f0d38..42fdb79 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -403,6 +403,8 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
 					 DMA_TO_DEVICE);
 	}
 
+	meta->tbs = 0;
+
 	if (trans->cfg->use_tfh) {
 		struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
 
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 7cd428c..ce2dd06 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3502,10 +3502,12 @@ static int hwsim_dump_radio_nl(struct sk_buff *skb,
 		hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
 				  cb->nlh->nlmsg_seq, &hwsim_genl_family,
 				  NLM_F_MULTI, HWSIM_CMD_GET_RADIO);
-		if (!hdr)
+		if (hdr) {
+			genl_dump_check_consistent(cb, hdr);
+			genlmsg_end(skb, hdr);
+		} else {
 			res = -EMSGSIZE;
-		genl_dump_check_consistent(cb, hdr);
-		genlmsg_end(skb, hdr);
+		}
 	}
 
 done:
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index b025ba1..e39bb5c 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -124,6 +124,7 @@ enum {
 
 #define MWIFIEX_MAX_TOTAL_SCAN_TIME	(MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S)
 
+#define WPA_GTK_OUI_OFFSET				2
 #define RSN_GTK_OUI_OFFSET				2
 
 #define MWIFIEX_OUI_NOT_PRESENT			0
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 6dd771c..ed27147 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -181,7 +181,8 @@ mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
 	u8 ret = MWIFIEX_OUI_NOT_PRESENT;
 
 	if (has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC)) {
-		iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data;
+		iebody = (struct ie_body *)((u8 *)bss_desc->bcn_wpa_ie->data +
+					    WPA_GTK_OUI_OFFSET);
 		oui = &mwifiex_wpa_oui[cipher][0];
 		ret = mwifiex_search_oui_in_ie(iebody, oui);
 		if (ret)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index d5081ff..1c84910 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -925,6 +925,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
 			skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
 			nskb = xenvif_alloc_skb(0);
 			if (unlikely(nskb == NULL)) {
+				skb_shinfo(skb)->nr_frags = 0;
 				kfree_skb(skb);
 				xenvif_tx_err(queue, &txreq, extra_count, idx);
 				if (net_ratelimit())
@@ -940,6 +941,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
 
 			if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
 				/* Failure in xenvif_set_skb_gso is fatal. */
+				skb_shinfo(skb)->nr_frags = 0;
 				kfree_skb(skb);
 				kfree_skb(nskb);
 				break;
diff --git a/drivers/nfc/nq-nci.c b/drivers/nfc/nq-nci.c
index 2c8f425..1d49640 100644
--- a/drivers/nfc/nq-nci.c
+++ b/drivers/nfc/nq-nci.c
@@ -41,21 +41,9 @@ static const struct of_device_id msm_match_table[] = {
 
 MODULE_DEVICE_TABLE(of, msm_match_table);
 
-#define DEV_COUNT	1
-#define DEVICE_NAME	"nq-nci"
-#define CLASS_NAME	"nqx"
-#define MAX_BUFFER_SIZE			(320)
-#define WAKEUP_SRC_TIMEOUT		(2000)
-#define MAX_RETRY_COUNT			3
-#define NCI_RESET_CMD_LEN		4
-#define NCI_RESET_RSP_LEN		4
-#define NCI_RESET_NTF_LEN		13
-#define NCI_GET_VERSION_CMD_LEN		8
-#define NCI_GET_VERSION_RSP_LEN		12
-#define MAX_IRQ_WAIT_TIME		(90)	//in ms
-
 struct nqx_dev {
 	wait_queue_head_t	read_wq;
+	wait_queue_head_t	cold_reset_read_wq;
 	struct	mutex		read_mutex;
 	struct	mutex		dev_ref_mutex;
 	struct	i2c_client	*client;
@@ -72,10 +60,14 @@ struct nqx_dev {
 	unsigned int		ese_gpio;
 	/* NFC VEN pin state powered by Nfc */
 	bool			nfc_ven_enabled;
+	/* NFC state reflected from MW */
+	bool			nfc_enabled;
 	/* NFC_IRQ state */
 	bool			irq_enabled;
 	/* NFC_IRQ wake-up state */
 	bool			irq_wake_up;
+	bool			cold_reset_rsp_pending;
+	uint8_t			cold_reset_status;
 	spinlock_t		irq_enabled_lock;
 	unsigned int		count_irq;
 	/* NFC_IRQ Count */
@@ -97,6 +89,9 @@ static int nfcc_reboot(struct notifier_block *notifier, unsigned long val,
 static int nqx_clock_select(struct nqx_dev *nqx_dev);
 /*clock disable function*/
 static int nqx_clock_deselect(struct nqx_dev *nqx_dev);
+static int nqx_standby_write(struct nqx_dev *nqx_dev,
+				const unsigned char *buf, size_t len);
+
 static struct notifier_block nfcc_notifier = {
 	.notifier_call	= nfcc_reboot,
 	.next			= NULL,
@@ -159,6 +154,92 @@ static irqreturn_t nqx_dev_irq_handler(int irq, void *dev_id)
 	return IRQ_HANDLED;
 }
 
+static int is_data_available_for_read(struct nqx_dev *nqx_dev)
+{
+	int ret;
+
+	nqx_enable_irq(nqx_dev);
+	ret = wait_event_interruptible_timeout(nqx_dev->read_wq,
+		!nqx_dev->irq_enabled, msecs_to_jiffies(MAX_IRQ_WAIT_TIME));
+	return ret;
+}
+
+static int send_cold_reset_cmd(struct nqx_dev *nqx_dev)
+{
+	int ret;
+	char *cold_reset_cmd = NULL;
+
+	if (gpio_get_value(nqx_dev->firm_gpio)) {
+		dev_err(&nqx_dev->client->dev, "FW download in-progress\n");
+		return -EBUSY;
+	}
+	if (!gpio_get_value(nqx_dev->en_gpio)) {
+		dev_err(&nqx_dev->client->dev, "VEN LOW - NFCC powered off\n");
+		return -ENODEV;
+	}
+	cold_reset_cmd = kzalloc(COLD_RESET_CMD_LEN, GFP_DMA | GFP_KERNEL);
+	if (!cold_reset_cmd)
+		return -ENOMEM;
+
+	cold_reset_cmd[0] = COLD_RESET_CMD_GID;
+	cold_reset_cmd[1] = COLD_RESET_OID;
+	cold_reset_cmd[2] = COLD_RESET_CMD_PAYLOAD_LEN;
+
+	ret = nqx_standby_write(nqx_dev, cold_reset_cmd, COLD_RESET_CMD_LEN);
+	if (ret < 0) {
+		dev_err(&nqx_dev->client->dev,
+			"%s: write failed after max retry\n", __func__);
+	}
+	kfree(cold_reset_cmd);
+	return ret;
+}
+
+static void read_cold_reset_rsp(struct nqx_dev *nqx_dev, bool isNfcEnabled,
+				char *header)
+{
+	int ret = -1;
+	char *cold_reset_rsp = NULL;
+
+	cold_reset_rsp = kzalloc(COLD_RESET_RSP_LEN, GFP_DMA | GFP_KERNEL);
+	if (!cold_reset_rsp)
+		return;
+
+	/*
+	 * read header also if NFC is disabled
+	 * for enable case, will be taken care by nfc_read thread
+	 */
+	if (!isNfcEnabled) {
+		ret = i2c_master_recv(nqx_dev->client, cold_reset_rsp,
+					NCI_HEADER_LEN);
+		if (ret != NCI_HEADER_LEN) {
+			dev_err(&nqx_dev->client->dev,
+				"%s: failure to read cold reset rsp header\n",
+				__func__);
+			goto error;
+		}
+	} else {
+		memcpy(cold_reset_rsp, header, NCI_HEADER_LEN);
+	}
+
+	if ((NCI_HEADER_LEN + cold_reset_rsp[2]) > COLD_RESET_RSP_LEN) {
+		dev_err(&nqx_dev->client->dev,
+			"%s: - invalid response for cold_reset\n", __func__);
+		ret = -EINVAL;
+		goto error;
+	}
+	ret = i2c_master_recv(nqx_dev->client, &cold_reset_rsp[NCI_PAYLOAD_IDX],
+				cold_reset_rsp[2]);
+	if (ret != cold_reset_rsp[2]) {
+		dev_err(&nqx_dev->client->dev,
+			"%s: failure to read cold reset rsp status\n",
+			__func__);
+		goto error;
+	}
+	nqx_dev->cold_reset_status = cold_reset_rsp[NCI_PAYLOAD_IDX];
+error:
+	kfree(cold_reset_rsp);
+}
+
 static ssize_t nfc_read(struct file *filp, char __user *buf,
 					size_t count, loff_t *offset)
 {
@@ -232,6 +313,24 @@ static ssize_t nfc_read(struct file *filp, char __user *buf,
 		ret = -EIO;
 		goto err;
 	}
+	/* check if it's response of cold reset command
+	 * NFC HAL process shouldn't receive this data as
+	 * command was sent by eSE HAL
+	 */
+	if (nqx_dev->cold_reset_rsp_pending
+		&& (tmp[0] == COLD_RESET_RSP_GID)
+		&& (tmp[1] == COLD_RESET_OID)) {
+		read_cold_reset_rsp(nqx_dev, true, tmp);
+		nqx_dev->cold_reset_rsp_pending = false;
+		wake_up_interruptible(&nqx_dev->cold_reset_read_wq);
+		mutex_unlock(&nqx_dev->read_mutex);
+		/*
+		 * NFC process doesn't know about cold reset command
+		 * being sent as it was initiated by eSE process
+		 * we shouldn't return any data to NFC process
+		 */
+		return 0;
+	}
 #ifdef NFC_KERNEL_BU
 		dev_dbg(&nqx_dev->client->dev, "%s : NfcNciRx %x %x %x\n",
 			__func__, tmp[0], tmp[1], tmp[2]);
@@ -326,17 +425,19 @@ static int nqx_standby_write(struct nqx_dev *nqx_dev,
 	return ret;
 }
 
+
 /*
  * Power management of the SN100 eSE
  * eSE and NFCC both are powered using VEN gpio in SN100,
  * VEN HIGH - eSE and NFCC both are powered on
  * VEN LOW - eSE and NFCC both are power down
  */
+
 static int sn100_ese_pwr(struct nqx_dev *nqx_dev, unsigned long arg)
 {
 	int r = -1;
 
-	if (arg == 0) {
+	if (arg == ESE_POWER_ON) {
 		/**
 		 * Let's store the NFC VEN pin state
 		 * will check stored value in case of eSE power off request,
@@ -355,7 +456,7 @@ static int sn100_ese_pwr(struct nqx_dev *nqx_dev, unsigned long arg)
 			dev_dbg(&nqx_dev->client->dev, "en_gpio already HIGH\n");
 		}
 		r = 0;
-	} else if (arg == 1) {
+	} else if (arg == ESE_POWER_OFF) {
 		if (!nqx_dev->nfc_ven_enabled) {
 			dev_dbg(&nqx_dev->client->dev, "NFC not enabled, disabling en_gpio\n");
 			gpio_set_value(nqx_dev->en_gpio, 0);
@@ -365,7 +466,40 @@ static int sn100_ese_pwr(struct nqx_dev *nqx_dev, unsigned long arg)
 			dev_dbg(&nqx_dev->client->dev, "keep en_gpio high as NFC is enabled\n");
 		}
 		r = 0;
-	} else if (arg == 3) {
+	} else if (arg == ESE_COLD_RESET) {
+		// set default value for status as failure
+		nqx_dev->cold_reset_status = EIO;
+
+		r = send_cold_reset_cmd(nqx_dev);
+		if (r <= 0) {
+			dev_err(&nqx_dev->client->dev,
+				"failed to send cold reset command\n");
+			return nqx_dev->cold_reset_status;
+		}
+		nqx_dev->cold_reset_rsp_pending = true;
+		// check if NFC is enabled
+		if (nqx_dev->nfc_enabled) {
+			/*
+			 * nfc_read thread will initiate cold reset response
+			 * and it will signal for data available
+			 */
+			wait_event_interruptible(nqx_dev->cold_reset_read_wq,
+				!nqx_dev->cold_reset_rsp_pending);
+		} else {
+			/*
+			 * Read data as NFC thread is not active
+			 */
+			r = is_data_available_for_read(nqx_dev);
+			if (r <= 0) {
+				nqx_disable_irq(nqx_dev);
+				nqx_dev->cold_reset_rsp_pending = false;
+				return nqx_dev->cold_reset_status;
+			}
+			read_cold_reset_rsp(nqx_dev, false, NULL);
+			nqx_dev->cold_reset_rsp_pending = false;
+		}
+		r = nqx_dev->cold_reset_status;
+	} else if (arg == ESE_POWER_STATE) {
 		// eSE power state
 		r = gpio_get_value(nqx_dev->en_gpio);
 	}
@@ -556,7 +690,7 @@ int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
 	int r = 0;
 	struct nqx_dev *nqx_dev = filp->private_data;
 
-	if (arg == 0) {
+	if (arg == NFC_POWER_OFF) {
 		/*
 		 * We are attempting a hardware reset so let us disable
 		 * interrupts to avoid spurious notifications to upper
@@ -590,7 +724,7 @@ int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
 				dev_err(&nqx_dev->client->dev, "unable to disable clock\n");
 		}
 		nqx_dev->nfc_ven_enabled = false;
-	} else if (arg == 1) {
+	} else if (arg == NFC_POWER_ON) {
 		nqx_enable_irq(nqx_dev);
 		dev_dbg(&nqx_dev->client->dev,
 			"gpio_set_value enable: %s: info: %p\n",
@@ -607,7 +741,7 @@ int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
 				dev_err(&nqx_dev->client->dev, "unable to enable clock\n");
 		}
 		nqx_dev->nfc_ven_enabled = true;
-	} else if (arg == 2) {
+	} else if (arg == NFC_FW_DWL_VEN_TOGGLE) {
 		/*
 		 * We are switching to Dowload Mode, toggle the enable pin
 		 * in order to set the NFCC in the new mode
@@ -629,7 +763,7 @@ int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
 		usleep_range(10000, 10100);
 		gpio_set_value(nqx_dev->en_gpio, 1);
 		usleep_range(10000, 10100);
-	} else if (arg == 4) {
+	} else if (arg == NFC_FW_DWL_HIGH) {
 		/*
 		 * Setting firmware download gpio to HIGH for SN100U
 		 * before FW download start
@@ -641,7 +775,7 @@ int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
 		} else
 			dev_err(&nqx_dev->client->dev,
 				"firm_gpio is invalid\n");
-	} else if (arg == 6) {
+	} else if (arg == NFC_FW_DWL_LOW) {
 		/*
 		 * Setting firmware download gpio to LOW for SN100U
 		 * FW download finished
@@ -654,6 +788,16 @@ int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
 			dev_err(&nqx_dev->client->dev,
 				"firm_gpio is invalid\n");
 		}
+	} else if (arg == NFC_ENABLE) {
+		/*
+		 * Setting flag true when NFC is enabled
+		 */
+		nqx_dev->nfc_enabled = true;
+	} else if (arg == NFC_DISABLE) {
+		/*
+		 * Setting flag false when NFC is disabled
+		 */
+		nqx_dev->nfc_enabled = false;
 	} else {
 		r = -ENOIOCTLCMD;
 	}
@@ -808,7 +952,7 @@ static int nfcc_hw_check(struct i2c_client *client, struct nqx_dev *nqx_dev)
 	}
 
 	nci_reset_ntf = kzalloc(NCI_RESET_NTF_LEN + 1,  GFP_DMA | GFP_KERNEL);
-	if (!nci_reset_rsp) {
+	if (!nci_reset_ntf) {
 		ret = -ENOMEM;
 		goto done;
 	}
@@ -897,9 +1041,8 @@ static int nfcc_hw_check(struct i2c_client *client, struct nqx_dev *nqx_dev)
 		}
 		goto err_nfcc_reset_failed;
 	}
-	nqx_enable_irq(nqx_dev);
-	ret = wait_event_interruptible_timeout(nqx_dev->read_wq,
-		!nqx_dev->irq_enabled, msecs_to_jiffies(MAX_IRQ_WAIT_TIME));
+
+	ret = is_data_available_for_read(nqx_dev);
 	if (ret <= 0) {
 		nqx_disable_irq(nqx_dev);
 		goto err_nfcc_hw_check;
@@ -915,9 +1058,8 @@ static int nfcc_hw_check(struct i2c_client *client, struct nqx_dev *nqx_dev)
 			goto reset_enable_gpio;
 		goto err_nfcc_hw_check;
 	}
-	nqx_enable_irq(nqx_dev);
-	ret = wait_event_interruptible_timeout(nqx_dev->read_wq,
-		!nqx_dev->irq_enabled, msecs_to_jiffies(MAX_IRQ_WAIT_TIME));
+
+	ret = is_data_available_for_read(nqx_dev);
 	if (ret <= 0) {
 		nqx_disable_irq(nqx_dev);
 		goto err_nfcc_hw_check;
@@ -1280,6 +1422,7 @@ static int nqx_probe(struct i2c_client *client,
 
 	/* init mutex and queues */
 	init_waitqueue_head(&nqx_dev->read_wq);
+	init_waitqueue_head(&nqx_dev->cold_reset_read_wq);
 	mutex_init(&nqx_dev->read_mutex);
 	mutex_init(&nqx_dev->dev_ref_mutex);
 	spin_lock_init(&nqx_dev->irq_enabled_lock);
@@ -1362,6 +1505,8 @@ static int nqx_probe(struct i2c_client *client,
 	device_set_wakeup_capable(&client->dev, true);
 	i2c_set_clientdata(client, nqx_dev);
 	nqx_dev->irq_wake_up = false;
+	nqx_dev->cold_reset_rsp_pending = false;
+	nqx_dev->nfc_enabled = false;
 
 	dev_err(&client->dev,
 	"%s: probing NFCC NQxxx exited successfully\n",
diff --git a/drivers/nfc/nq-nci.h b/drivers/nfc/nq-nci.h
index 12b0737..8d807ec 100644
--- a/drivers/nfc/nq-nci.h
+++ b/drivers/nfc/nq-nci.h
@@ -24,12 +24,65 @@
 #define SET_EMULATOR_TEST_POINT		_IOW(0xE9, 0x05, unsigned int)
 #define NFCC_INITIAL_CORE_RESET_NTF	_IOW(0xE9, 0x10, unsigned int)
 
+#define DEV_COUNT			1
+#define DEVICE_NAME			"nq-nci"
+#define CLASS_NAME			"nqx"
+#define MAX_BUFFER_SIZE			(320)
+#define WAKEUP_SRC_TIMEOUT		(2000)
+#define NCI_HEADER_LEN			3
+#define NCI_PAYLOAD_IDX			3
+#define MAX_RETRY_COUNT			3
+#define NCI_RESET_CMD_LEN		4
+#define NCI_RESET_RSP_LEN		4
+#define NCI_RESET_NTF_LEN		13
+#define NCI_GET_VERSION_CMD_LEN		8
+#define NCI_GET_VERSION_RSP_LEN		12
+#define MAX_IRQ_WAIT_TIME		(90)	//in ms
+#define COLD_RESET_CMD_LEN		3
+#define COLD_RESET_RSP_LEN		4
+#define COLD_RESET_CMD_GID		0x2F
+#define COLD_RESET_CMD_PAYLOAD_LEN	0x00
+#define COLD_RESET_RSP_GID		0x4F
+#define COLD_RESET_OID			0x1E
+
 #define NFC_RX_BUFFER_CNT_START		(0x0)
 #define PAYLOAD_HEADER_LENGTH		(0x3)
 #define PAYLOAD_LENGTH_MAX		(256)
 #define BYTE				(0x8)
 #define NCI_IDENTIFIER			(0x10)
 
+enum ese_ioctl_request {
+	/* eSE POWER ON */
+	ESE_POWER_ON = 0,
+	/* eSE POWER OFF */
+	ESE_POWER_OFF,
+	/* eSE COLD RESET */
+	ESE_COLD_RESET,
+	/* eSE POWER STATE */
+	ESE_POWER_STATE
+};
+
+enum nfcc_ioctl_request {
+	/* NFC disable request with VEN LOW */
+	NFC_POWER_OFF = 0,
+	/* NFC enable request with VEN Toggle */
+	NFC_POWER_ON,
+	/* firmware download request with VEN Toggle */
+	NFC_FW_DWL_VEN_TOGGLE,
+	/* ISO reset request */
+	NFC_ISO_RESET,
+	/* request for firmware download gpio HIGH */
+	NFC_FW_DWL_HIGH,
+	/* hard reset request */
+	NFC_HARD_RESET,
+	/* request for firmware download gpio LOW */
+	NFC_FW_DWL_LOW,
+	/* NFC enable without VEN gpio modification */
+	NFC_ENABLE,
+	/* NFC disable without VEN gpio modification */
+	NFC_DISABLE
+};
+
 enum nfcc_initial_core_reset_ntf {
 	TIMEDOUT_INITIAL_CORE_RESET_NTF = 0, /* 0*/
 	ARRIVED_INITIAL_CORE_RESET_NTF, /* 1 */
diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
index f55d082..5d6e7e9 100644
--- a/drivers/nfc/st-nci/se.c
+++ b/drivers/nfc/st-nci/se.c
@@ -344,6 +344,8 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
 
 		transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
 					    skb->len - 2, GFP_KERNEL);
+		if (!transaction)
+			return -ENOMEM;
 
 		transaction->aid_len = skb->data[1];
 		memcpy(transaction->aid, &skb->data[2], transaction->aid_len);
diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
index acdce23..569475a 100644
--- a/drivers/nfc/st21nfca/se.c
+++ b/drivers/nfc/st21nfca/se.c
@@ -335,6 +335,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
 
 		transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
 						   skb->len - 2, GFP_KERNEL);
+		if (!transaction)
+			return -ENOMEM;
 
 		transaction->aid_len = skb->data[1];
 		memcpy(transaction->aid, &skb->data[2],
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 260248fb..a11e210 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -20,11 +20,6 @@ module_param(multipath, bool, 0444);
 MODULE_PARM_DESC(multipath,
 	"turn on native support for multiple controllers per subsystem");
 
-inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
-{
-	return multipath && ctrl->subsys && (ctrl->subsys->cmic & (1 << 3));
-}
-
 /*
  * If multipathing is enabled we need to always use the subsystem instance
  * number for numbering our devices to avoid conflicts between subsystems that
@@ -516,7 +511,8 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
 {
 	int error;
 
-	if (!nvme_ctrl_use_ana(ctrl))
+	/* check if multipath is enabled and we have the capability */
+	if (!multipath || !ctrl->subsys || !(ctrl->subsys->cmic & (1 << 3)))
 		return 0;
 
 	ctrl->anacap = id->anacap;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index e82cdae..d5e29b5 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -464,7 +464,11 @@ extern const struct attribute_group nvme_ns_id_attr_group;
 extern const struct block_device_operations nvme_ns_head_ops;
 
 #ifdef CONFIG_NVME_MULTIPATH
-bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl);
+static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
+{
+	return ctrl->ana_log_buf != NULL;
+}
+
 void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
 			struct nvme_ctrl *ctrl, int *flags);
 void nvme_failover_req(struct request *req);
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.h b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.h
index 88253ff..de03667 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.h
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.h
@@ -79,6 +79,7 @@
 #define UFS_PHY_RX_HSGEAR_CAPABILITY		PHY_OFF(0xB4)
 #define UFS_PHY_RX_MIN_HIBERN8_TIME		PHY_OFF(0x150)
 #define UFS_PHY_BIST_FIXED_PAT_CTRL		PHY_OFF(0x60)
+#define UFS_PHY_RX_SIGDET_CTRL1			PHY_OFF(0x154)
 
 /* UFS PHY TX registers */
 #define QSERDES_TX0_PWM_GEAR_1_DIVIDER_BAND0_1	TX_OFF(0, 0x168)
@@ -270,6 +271,7 @@ static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_no_g4[] = {
 	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_HS_GEAR_BAND, 0x06),
 	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_HSGEAR_CAPABILITY, 0x03),
 	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_HSGEAR_CAPABILITY, 0x03),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL1, 0x0E),
 };
 
 static struct ufs_qcom_phy_calibration phy_cal_table_2nd_lane_no_g4[] = {
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index 537e8cf..cac0034 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -3386,8 +3386,18 @@ int __gsi_get_gci_cookie(struct gsi_chan_ctx *ctx, uint16_t idx)
 		}
 	}
 
-	/* TODO: Increase escape buffer size if we hit this */
-	GSIERR("user_data is full\n");
+	/* Go over original userdata when escape buffer is full (costly) */
+	GSIDBG("escape buffer is full\n");
+	for (i = 0; i < end; i++) {
+		if (!ctx->user_data[i].valid) {
+			ctx->user_data[i].valid = true;
+			return i;
+		}
+	}
+
+	/* Everything is full (possibly a stall) */
+	GSIERR("both userdata array and escape buffer is full\n");
+	BUG();
 	return 0xFFFF;
 }
 
@@ -3424,7 +3434,7 @@ int __gsi_populate_gci_tre(struct gsi_chan_ctx *ctx,
 
 	/* write the TRE to ring */
 	*tre_gci_ptr = gci_tre;
-	ctx->user_data[idx].p = xfer->xfer_user_data;
+	ctx->user_data[gci_tre.cookie].p = xfer->xfer_user_data;
 
 	return 0;
 }
@@ -3730,7 +3740,7 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
 		curr = GSI_CHAN_MODE_CALLBACK;
 
 	if (unlikely(mode == curr)) {
-		GSIERR("already in requested mode %u chan_hdl=%lu\n",
+		GSIDBG("already in requested mode %u chan_hdl=%lu\n",
 				curr, chan_hdl);
 		return -GSI_STATUS_UNSUPPORTED_OP;
 	}
@@ -3741,7 +3751,7 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
 		gsi_writel(1 << ctx->evtr->id, gsi_ctx->base +
 			GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(gsi_ctx->per.ee));
 		atomic_set(&ctx->poll_mode, mode);
-		if (ctx->props.prot == GSI_CHAN_PROT_GCI)
+		if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && ctx->evtr->chan)
 			atomic_set(&ctx->evtr->chan->poll_mode, mode);
 		GSIDBG("set gsi_ctx evtr_id %d to %d mode\n",
 			ctx->evtr->id, mode);
@@ -3751,7 +3761,7 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
 	if (curr == GSI_CHAN_MODE_POLL &&
 			mode == GSI_CHAN_MODE_CALLBACK) {
 		atomic_set(&ctx->poll_mode, mode);
-		if (ctx->props.prot == GSI_CHAN_PROT_GCI)
+		if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && ctx->evtr->chan)
 			atomic_set(&ctx->evtr->chan->poll_mode, mode);
 		__gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, ~0);
 		GSIDBG("set gsi_ctx evtr_id %d to %d mode\n",
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 28a2fc6..5b41486 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -1997,6 +1997,43 @@ int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats)
 EXPORT_SYMBOL(ipa_get_wdi_stats);
 
 /**
+ * ipa_uc_bw_monitor() - start uc bw monitoring
+ * @info:	[inout] set info populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa_uc_bw_monitor(struct ipa_wdi_bw_info *info)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_uc_bw_monitor, info);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_uc_bw_monitor);
+
+/**
+ * ipa_set_wlan_tx_info() -set WDI statistics from uc
+ * @info:	[inout] set info populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa_set_wlan_tx_info(struct ipa_wdi_tx_info *info)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_set_wlan_tx_info, info);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_set_wlan_tx_info);
+/**
  * ipa_get_smem_restr_bytes()- Return IPA smem restricted bytes
  *
  * Return value: u16 - number of IPA smem restricted bytes
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
index 62457e2..553616f 100644
--- a/drivers/platform/msm/ipa/ipa_api.h
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -212,6 +212,10 @@ struct ipa_api_controller {
 
 	int (*ipa_get_wdi_stats)(struct IpaHwStatsWDIInfoData_t *stats);
 
+	int (*ipa_uc_bw_monitor)(struct ipa_wdi_bw_info *info);
+
+	int (*ipa_set_wlan_tx_info)(struct ipa_wdi_tx_info *info);
+
 	u16 (*ipa_get_smem_restr_bytes)(void);
 
 	int (*ipa_broadcast_wdi_quota_reach_ind)(uint32_t fid,
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c b/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c
index 90082fc..0c8bb24 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c
@@ -760,3 +760,15 @@ int ipa_wdi_get_stats(struct IpaHwStatsWDIInfoData_t *stats)
 	return ipa_get_wdi_stats(stats);
 }
 EXPORT_SYMBOL(ipa_wdi_get_stats);
+
+int ipa_wdi_bw_monitor(struct ipa_wdi_bw_info *info)
+{
+	return ipa_uc_bw_monitor(info);
+}
+EXPORT_SYMBOL(ipa_wdi_bw_monitor);
+
+int ipa_wdi_sw_stats(struct ipa_wdi_tx_info *info)
+{
+	return ipa_set_wlan_tx_info(info);
+}
+EXPORT_SYMBOL(ipa_wdi_sw_stats);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 7bc80ac..c7cfcce 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -132,6 +132,7 @@ static int ipa3_ioctl_add_flt_rule_after_v2(unsigned long arg);
 static int ipa3_ioctl_mdfy_flt_rule_v2(unsigned long arg);
 static int ipa3_ioctl_fnr_counter_alloc(unsigned long arg);
 static int ipa3_ioctl_fnr_counter_query(unsigned long arg);
+static int ipa3_ioctl_fnr_counter_set(unsigned long arg);
 
 static struct ipa3_plat_drv_res ipa3_res = {0, };
 
@@ -1480,6 +1481,43 @@ static int ipa3_ioctl_fnr_counter_query(unsigned long arg)
 	return retval;
 }
 
+static int ipa3_ioctl_fnr_counter_set(unsigned long arg)
+{
+	u8 header[128] = { 0 };
+	uint8_t value;
+
+	if (copy_from_user(header, (const void __user *)arg,
+		sizeof(struct ipa_ioc_fnr_index_info))) {
+		IPAERR_RL("copy_from_user fails\n");
+		return -EFAULT;
+	}
+
+	value = ((struct ipa_ioc_fnr_index_info *)
+		header)->hw_counter_offset;
+	if (value <= 0 || value > IPA_MAX_FLT_RT_CNT_INDEX) {
+		IPAERR("hw_counter_offset failed: num %d\n",
+			value);
+		return -EPERM;
+	}
+
+	ipa3_ctx->fnr_info.hw_counter_offset = value;
+
+	value = ((struct ipa_ioc_fnr_index_info *)
+		header)->sw_counter_offset;
+	if (value <= 0 || value > IPA_MAX_FLT_RT_CNT_INDEX) {
+		IPAERR("sw_counter_offset failed: num %d\n",
+			value);
+		return -EPERM;
+	}
+	ipa3_ctx->fnr_info.sw_counter_offset = value;
+	/* reset when ipacm-cleanup */
+	ipa3_ctx->fnr_info.valid = true;
+	IPADBG("fnr_info hw=%d, hw=%d\n",
+		ipa3_ctx->fnr_info.hw_counter_offset,
+		ipa3_ctx->fnr_info.sw_counter_offset);
+	return 0;
+}
+
 static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	int retval = 0;
@@ -2648,6 +2686,10 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 		retval = ipa3_ioctl_fnr_counter_query(arg);
 		break;
 
+	case IPA_IOC_SET_FNR_COUNTER_INFO:
+		retval = ipa3_ioctl_fnr_counter_set(arg);
+		break;
+
 	case IPA_IOC_WIGIG_FST_SWITCH:
 		IPADBG("Got IPA_IOCTL_WIGIG_FST_SWITCH\n");
 		if (copy_from_user(&fst_switch, (const void __user *)arg,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 58c924f..0222b28 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -1412,6 +1412,12 @@ struct ipa3_uc_ctx {
 	u32 rdy_comp_ring_size;
 	u32 *rdy_ring_rp_va;
 	u32 *rdy_comp_ring_wp_va;
+	bool uc_event_ring_valid;
+	struct ipa_mem_buffer event_ring;
+	u32 ering_wp_local;
+	u32 ering_rp_local;
+	u32 ering_wp;
+	u32 ering_rp;
 };
 
 /**
@@ -1501,6 +1507,17 @@ struct ipa3cm_client_info {
 	bool uplink;
 };
 
+/**
+ * struct ipacm_fnr_info - the fnr-info indicated from IPACM
+ * @ipacm_client_enum: the enum to indicate tether-client
+ * @ipacm_client_uplink: the bool to indicate pipe for uplink
+ */
+struct ipacm_fnr_info {
+	bool valid;
+	uint8_t hw_counter_offset;
+	uint8_t sw_counter_offset;
+};
+
 struct ipa3_smp2p_info {
 	u32 out_base_id;
 	u32 in_base_id;
@@ -1893,6 +1910,7 @@ struct ipa3_context {
 	struct IpaHwOffloadStatsAllocCmdData_t
 		gsi_info[IPA_HW_PROTOCOL_MAX];
 	bool ipa_wan_skb_page;
+	struct ipacm_fnr_info fnr_info;
 };
 
 struct ipa3_plat_drv_res {
@@ -2572,6 +2590,9 @@ bool ipa3_get_client_uplink(int pipe_idx);
 int ipa3_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats);
 
 int ipa3_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota);
+
+int ipa3_inform_wlan_bw(struct ipa_inform_wlan_bw *wdi_bw);
+
 /*
  * IPADMA
  */
@@ -2839,6 +2860,10 @@ int ipa3_uc_send_remote_ipa_info(u32 remote_addr, uint32_t mbox_n);
 int ipa3_uc_debug_stats_alloc(
 	struct IpaHwOffloadStatsAllocCmdData_t cmdinfo);
 int ipa3_uc_debug_stats_dealloc(uint32_t protocol);
+int ipa3_uc_quota_monitor(uint64_t quota);
+int ipa3_uc_bw_monitor(struct ipa_wdi_bw_info *info);
+int ipa3_uc_setup_event_ring(void);
+int ipa3_set_wlan_tx_info(struct ipa_wdi_tx_info *info);
 void ipa3_tag_destroy_imm(void *user1, int user2);
 const struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info
 	(enum ipa_client_type client);
@@ -2897,6 +2922,8 @@ int ipa_get_flt_rt_stats(struct ipa_ioc_flt_rt_query *query);
 
 int ipa_set_flt_rt_stats(int index, struct ipa_flt_rt_stats stats);
 
+bool ipa_get_fnr_info(struct ipacm_fnr_info *fnr_info);
+
 u32 ipa3_get_num_pipes(void);
 struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(enum ipa_smmu_cb_type);
 struct iommu_domain *ipa3_get_smmu_domain(void);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
index e5896ec..4e50080 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
@@ -376,12 +376,6 @@ struct ipa_mpm_mhi_driver {
 	/* General MPM mutex to protect concurrent update of MPM GSI states */
 	struct mutex mutex;
 	/*
-	 * Mutex to protect IPA clock vote/unvote to make sure IPA isn't double
-	 * devoted for concurrency scenarios such as SSR and LPM mode CB
-	 * concurrency.
-	 */
-	struct mutex lpm_mutex;
-	/*
 	 * Mutex to protect mhi_dev update/ access, for concurrency such as
 	 * 5G SSR and USB disconnect/connect.
 	 */
@@ -1357,7 +1351,6 @@ static void ipa_mpm_mhip_shutdown(int mhip_idx)
 
 	ipa_mpm_clean_mhip_chan(mhip_idx, dl_cons_chan);
 
-	mutex_lock(&ipa_mpm_ctx->md[mhip_idx].lpm_mutex);
 	if (!ipa_mpm_ctx->md[mhip_idx].in_lpm) {
 		ipa_mpm_vote_unvote_ipa_clk(CLK_OFF, mhip_idx);
 		/* while in modem shutdown scenarios such as SSR, no explicit
@@ -1365,10 +1358,8 @@ static void ipa_mpm_mhip_shutdown(int mhip_idx)
 		 */
 		ipa_mpm_ctx->md[mhip_idx].in_lpm = true;
 	}
-	mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].lpm_mutex);
 	mutex_lock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
 	ipa_mpm_ctx->md[mhip_idx].mhi_dev = NULL;
-	ipa_mpm_ctx->md[mhip_idx].init_complete = false;
 	mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
 	IPA_MPM_FUNC_EXIT();
 }
@@ -1399,6 +1390,18 @@ static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
 		return 0;
 	}
 
+	if (!ipa_mpm_ctx->md[probe_id].init_complete) {
+		/*
+		 * SSR might be in progress, dont have to vote/unvote for
+		 * IPA clocks as it will be taken care in remove_cb/subsequent
+		 * probe.
+		 */
+		IPA_MPM_DBG("SSR in progress, return\n");
+		mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+		return 0;
+	}
+	mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+
 	IPA_MPM_ERR("PCIe clock vote/unvote = %d probe_id = %d clk_cnt = %d\n",
 		vote, probe_id,
 		atomic_read(&ipa_mpm_ctx->md[probe_id].clk_cnt.pcie_clk_cnt));
@@ -1409,7 +1412,6 @@ static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
 		if (result) {
 			IPA_MPM_ERR("mhi_sync_get failed for probe_id %d\n",
 				result, probe_id);
-			mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 			return result;
 		}
 
@@ -1423,7 +1425,6 @@ static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
 			IPA_MPM_DBG("probe_id %d PCIE clock already devoted\n",
 				probe_id);
 			WARN_ON(1);
-			mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 			return 0;
 		}
 		mhi_device_put(ipa_mpm_ctx->md[probe_id].mhi_dev, MHI_VOTE_BUS);
@@ -1431,8 +1432,6 @@ static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
 		atomic_dec(&ipa_mpm_ctx->md[probe_id].clk_cnt.pcie_clk_cnt);
 		atomic_dec(&ipa_mpm_ctx->pcie_clk_total_cnt);
 	}
-
-	mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 	return result;
 }
 
@@ -1482,11 +1481,9 @@ static int ipa_mpm_start_stop_remote_mhip_chan(
 		return ret;
 	}
 
-	mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 	/* For error state, expect modem SSR to recover from error */
 	if (ipa_mpm_ctx->md[probe_id].remote_state == MPM_MHIP_REMOTE_ERR) {
 		IPA_MPM_ERR("Remote channels in err state for %d\n", probe_id);
-		mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 		return -EFAULT;
 	}
 
@@ -1497,12 +1494,14 @@ static int ipa_mpm_start_stop_remote_mhip_chan(
 				probe_id);
 		} else {
 			ret = mhi_resume_transfer(mhi_dev);
+			mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 			if (ret)
 				ipa_mpm_ctx->md[probe_id].remote_state =
 							MPM_MHIP_REMOTE_ERR;
 			else
 				ipa_mpm_ctx->md[probe_id].remote_state =
 							MPM_MHIP_REMOTE_START;
+			mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 		}
 	} else {
 		if (ipa_mpm_ctx->md[probe_id].remote_state ==
@@ -1511,15 +1510,16 @@ static int ipa_mpm_start_stop_remote_mhip_chan(
 					probe_id);
 		} else {
 			ret = mhi_pause_transfer(mhi_dev);
+			mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 			if (ret)
 				ipa_mpm_ctx->md[probe_id].remote_state =
 							MPM_MHIP_REMOTE_ERR;
 			else
 				ipa_mpm_ctx->md[probe_id].remote_state =
 							MPM_MHIP_REMOTE_STOP;
+			mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 		}
 	}
-	mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 	return ret;
 }
 
@@ -1563,6 +1563,15 @@ static enum mhip_status_type ipa_mpm_start_stop_mhip_chan(
 		IPA_MPM_ERR("fail to get EP# for idx %d\n", ipa_ep_idx);
 		return MHIP_STATUS_EP_NOT_FOUND;
 	}
+
+	mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+	if (!ipa_mpm_ctx->md[probe_id].init_complete) {
+		IPA_MPM_ERR("MHIP probe %d not initialized\n", probe_id);
+		mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+		return MHIP_STATUS_EP_NOT_READY;
+	}
+	mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+
 	ep = &ipa3_ctx->ep[ipa_ep_idx];
 
 	if (mhip_chan == IPA_MPM_MHIP_CHAN_UL) {
@@ -1951,10 +1960,8 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 	ipa_mpm_ctx->md[probe_id].remote_state = MPM_MHIP_REMOTE_STOP;
 	mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 	ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id);
-	mutex_lock(&ipa_mpm_ctx->md[probe_id].lpm_mutex);
 	ipa_mpm_vote_unvote_ipa_clk(CLK_ON, probe_id);
 	ipa_mpm_ctx->md[probe_id].in_lpm = false;
-	mutex_unlock(&ipa_mpm_ctx->md[probe_id].lpm_mutex);
 	IPA_MPM_DBG("ul chan = %d, dl_chan = %d\n", ul_prod, dl_cons);
 
 	/*
@@ -2089,8 +2096,10 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 		 * to MHI driver. remove_cb will be called eventually when
 		 * Device side comes from where pending cleanup happens.
 		 */
+		mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 		atomic_inc(&ipa_mpm_ctx->probe_cnt);
-		ipa_mpm_ctx->md[probe_id].init_complete = true;
+		ipa_mpm_ctx->md[probe_id].init_complete = false;
+		mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 		IPA_MPM_FUNC_EXIT();
 		return 0;
 	}
@@ -2271,8 +2280,6 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 	}
 
 	atomic_inc(&ipa_mpm_ctx->probe_cnt);
-	ipa_mpm_ctx->md[probe_id].init_complete = true;
-
 	/* Check if ODL pipe is connected to MHIP DPL pipe before probe */
 	if (probe_id == IPA_MPM_MHIP_CH_ID_2 &&
 		ipa3_is_odl_connected()) {
@@ -2280,7 +2287,9 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 		ret = ipa_mpm_set_dma_mode(IPA_CLIENT_MHI_PRIME_DPL_PROD,
 			IPA_CLIENT_USB_DPL_CONS, false);
 	}
-
+	mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+	ipa_mpm_ctx->md[probe_id].init_complete = true;
+	mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 	IPA_MPM_FUNC_EXIT();
 	return 0;
 
@@ -2349,6 +2358,11 @@ static void ipa_mpm_mhi_remove_cb(struct mhi_device *mhi_dev)
 	}
 
 	IPA_MPM_DBG("remove_cb for mhip_idx = %d", mhip_idx);
+
+	mutex_lock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
+	ipa_mpm_ctx->md[mhip_idx].init_complete = false;
+	mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
+
 	ipa_mpm_mhip_shutdown(mhip_idx);
 
 	atomic_dec(&ipa_mpm_ctx->probe_cnt);
@@ -2387,7 +2401,19 @@ static void ipa_mpm_mhi_status_cb(struct mhi_device *mhi_dev,
 		return;
 	}
 
-	mutex_lock(&ipa_mpm_ctx->md[mhip_idx].lpm_mutex);
+	mutex_lock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
+	if (!ipa_mpm_ctx->md[mhip_idx].init_complete) {
+		/*
+		 * SSR might be in progress, dont have to vote/unvote for
+		 * IPA clocks as it will be taken care in remove_cb/subsequent
+		 * probe.
+		 */
+		IPA_MPM_DBG("SSR in progress, return\n");
+		mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
+		return;
+	}
+	mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
+
 	switch (mhi_cb) {
 	case MHI_CB_IDLE:
 		break;
@@ -2424,7 +2450,6 @@ static void ipa_mpm_mhi_status_cb(struct mhi_device *mhi_dev,
 		IPA_MPM_ERR("unexpected event %d\n", mhi_cb);
 		break;
 	}
-	mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].lpm_mutex);
 }
 
 static void ipa_mpm_mhip_map_prot(enum ipa_usb_teth_prot prot,
@@ -2778,7 +2803,6 @@ static int ipa_mpm_probe(struct platform_device *pdev)
 	for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
 		mutex_init(&ipa_mpm_ctx->md[i].mutex);
 		mutex_init(&ipa_mpm_ctx->md[i].mhi_mutex);
-		mutex_init(&ipa_mpm_ctx->md[i].lpm_mutex);
 	}
 
 	ipa_mpm_ctx->dev_info.pdev = pdev;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
index 5df8dcb..851ff62 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
@@ -14,7 +14,7 @@
 
 #define IPA_UC_DBG_STATS_GET_PROT_ID(x) (0xff & ((x) >> 24))
 #define IPA_UC_DBG_STATS_GET_OFFSET(x) (0x00ffffff & (x))
-
+#define IPA_UC_EVENT_RING_SIZE 10
 /**
  * Mailbox register to Interrupt HWP for CPU cmd
  * Usage of IPA_UC_MAILBOX_m_n doorbell instead of IPA_IRQ_EE_UC_0
@@ -24,6 +24,11 @@
 #define IPA_CPU_2_HW_CMD_MBOX_m          0
 #define IPA_CPU_2_HW_CMD_MBOX_n         23
 
+#define IPA_UC_ERING_m 0
+#define IPA_UC_ERING_n_r 1
+#define IPA_UC_ERING_n_w 0
+#define IPA_UC_MON_INTERVAL 5
+
 /**
  * enum ipa3_cpu_2_hw_commands - Values that represent the commands from the CPU
  * IPA_CPU_2_HW_CMD_NO_OP : No operation is required.
@@ -39,6 +44,7 @@
  * IPA_CPU_2_HW_CMD_RESET_PIPE : Command to reset a pipe - SW WA for a HW bug.
  * IPA_CPU_2_HW_CMD_GSI_CH_EMPTY : Command to check for GSI channel emptiness.
  * IPA_CPU_2_HW_CMD_REMOTE_IPA_INFO: Command to store remote IPA Info
+ * IPA_CPU_2_HW_CMD_SETUP_EVENT_RING:  Command to setup the event ring
  */
 enum ipa3_cpu_2_hw_commands {
 	IPA_CPU_2_HW_CMD_NO_OP                     =
@@ -65,6 +71,8 @@ enum ipa3_cpu_2_hw_commands {
 		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 10),
 	IPA_CPU_2_HW_CMD_REMOTE_IPA_INFO           =
 		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 11),
+	IPA_CPU_2_HW_CMD_SETUP_EVENT_RING          =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 12),
 };
 
 /**
@@ -161,6 +169,23 @@ union IpaHwChkChEmptyCmdData_t {
 	u32 raw32b;
 } __packed;
 
+struct IpaSetupEventRingCmdParams_t {
+	u32 ring_base_pa;
+	u32 ring_base_pa_hi;
+	u32 ring_size; //size = 10
+} __packed;
+
+
+/**
+ * Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_SETUP_EVENT_RING command. Parameters are
+ * sent as 32b immediate parameters.
+ */
+union IpaSetupEventRingCmdData_t {
+	struct IpaSetupEventRingCmdParams_t event;
+	u32 raw32b[6]; //uc-internal
+} __packed;
+
 
 /**
  * Structure holding the parameters for IPA_CPU_2_HW_CMD_REMOTE_IPA_INFO
@@ -314,6 +339,63 @@ static void ipa3_log_evt_hdlr(void)
 	ipa3_ctx->uc_ctx.uc_event_top_ofst = 0;
 }
 
+static void ipa3_event_ring_hdlr(void)
+{
+	u32 ering_rp, offset;
+	void *rp_va;
+	struct ipa_inform_wlan_bw bw_info;
+	struct eventElement_t *e_b = NULL, *e_q = NULL;
+	int mul = 0;
+
+	ering_rp = ipahal_read_reg_mn(IPA_UC_MAILBOX_m_n,
+		IPA_UC_ERING_m, IPA_UC_ERING_n_r);
+	offset = sizeof(struct eventElement_t);
+	ipa3_ctx->uc_ctx.ering_rp = ering_rp;
+
+	while (ipa3_ctx->uc_ctx.ering_rp_local != ering_rp) {
+		rp_va = ipa3_ctx->uc_ctx.event_ring.base +
+			ipa3_ctx->uc_ctx.ering_rp_local;
+
+		if (((struct eventElement_t *) rp_va)->Opcode == BW_NOTIFY) {
+			e_b = ((struct eventElement_t *) rp_va);
+			IPADBG("prot(%d), index (%d) throughput (%lu)\n",
+			e_b->Protocol,
+			e_b->Value.bw_param.ThresholdIndex,
+			e_b->Value.bw_param.throughput);
+
+			memset(&bw_info, 0, sizeof(struct ipa_inform_wlan_bw));
+			bw_info.index =
+				e_b->Value.bw_param.ThresholdIndex;
+			mul = 1000 / IPA_UC_MON_INTERVAL;
+			bw_info.throughput =
+				e_b->Value.bw_param.throughput*mul;
+			if (ipa3_inform_wlan_bw(&bw_info))
+				IPAERR_RL("failed on index %d to wlan\n",
+				bw_info.index);
+		} else if (((struct eventElement_t *) rp_va)->Opcode
+			== QUOTA_NOTIFY) {
+			e_q = ((struct eventElement_t *) rp_va);
+			IPADBG("got quota-notify %d reach(%d) usage (%lu)\n",
+			e_q->Protocol,
+			e_q->Value.quota_param.ThreasholdReached,
+			e_q->Value.quota_param.usage);
+			if (ipa3_broadcast_wdi_quota_reach_ind(0,
+				e_q->Value.quota_param.usage))
+				IPAERR_RL("failed on quota_reach for %d\n",
+				e_q->Protocol);
+		}
+		ipa3_ctx->uc_ctx.ering_rp_local += offset;
+		ipa3_ctx->uc_ctx.ering_rp_local %=
+			ipa3_ctx->uc_ctx.event_ring.size;
+		/* update wp */
+		ipa3_ctx->uc_ctx.ering_wp_local += offset;
+		ipa3_ctx->uc_ctx.ering_wp_local %=
+			ipa3_ctx->uc_ctx.event_ring.size;
+		ipahal_write_reg_mn(IPA_UC_MAILBOX_m_n, IPA_UC_ERING_m,
+			IPA_UC_ERING_n_w, ipa3_ctx->uc_ctx.ering_wp_local);
+	}
+}
+
 /**
  * ipa3_uc_state_check() - Check the status of the uC interface
  *
@@ -441,6 +523,11 @@ static void ipa3_uc_event_handler(enum ipa_irq_type interrupt,
 		IPADBG("uC evt log info ofst=0x%x\n",
 			ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams);
 		ipa3_log_evt_hdlr();
+	} else if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+		IPA_HW_2_CPU_EVNT_RING_NOTIFY) {
+		IPADBG("uC evt log info ofst=0x%x\n",
+			ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams);
+		ipa3_event_ring_hdlr();
 	} else {
 		IPADBG("unsupported uC evt opcode=%u\n",
 				ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
@@ -1099,3 +1186,254 @@ int ipa3_uc_debug_stats_dealloc(uint32_t protocol)
 	IPADBG("exit\n");
 	return result;
 }
+
+int ipa3_uc_setup_event_ring(void)
+{
+	int res = 0;
+	struct ipa_mem_buffer cmd, *ring;
+	union IpaSetupEventRingCmdData_t *ring_info;
+
+	ring = &ipa3_ctx->uc_ctx.event_ring;
+	/* Allocate event ring */
+	ring->size = sizeof(struct eventElement_t) * IPA_UC_EVENT_RING_SIZE;
+	ring->base = dma_alloc_coherent(ipa3_ctx->uc_pdev, ring->size,
+		&ring->phys_base, GFP_KERNEL);
+	if (ring->base == NULL)
+		return -ENOMEM;
+
+	cmd.size = sizeof(*ring_info);
+	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+		&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL) {
+		dma_free_coherent(ipa3_ctx->uc_pdev, ring->size,
+			ring->base, ring->phys_base);
+		return -ENOMEM;
+	}
+
+	ring_info = (union IpaSetupEventRingCmdData_t *) cmd.base;
+	ring_info->event.ring_base_pa = (u32) (ring->phys_base & 0xFFFFFFFF);
+	ring_info->event.ring_base_pa_hi =
+		(u32) ((ring->phys_base & 0xFFFFFFFF00000000) >> 32);
+	ring_info->event.ring_size = IPA_UC_EVENT_RING_SIZE;
+
+	res = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+		IPA_CPU_2_HW_CMD_SETUP_EVENT_RING, 0,
+		false, 10 * HZ);
+
+	if (res) {
+		IPAERR(" faile to setup event ring 0x%x 0x%x, size %d\n",
+			ring_info->event.ring_base_pa,
+			ring_info->event.ring_base_pa_hi,
+			ring_info->event.ring_size);
+		goto free_cmd;
+	}
+
+	ipa3_ctx->uc_ctx.uc_event_ring_valid = true;
+	/* write wp/rp values */
+	ipa3_ctx->uc_ctx.ering_rp_local = 0;
+	ipa3_ctx->uc_ctx.ering_wp_local =
+		ring->size - sizeof(struct eventElement_t);
+	ipahal_write_reg_mn(IPA_UC_MAILBOX_m_n,
+		IPA_UC_ERING_m, IPA_UC_ERING_n_r, 0);
+	ipahal_write_reg_mn(IPA_UC_MAILBOX_m_n,
+		IPA_UC_ERING_m, IPA_UC_ERING_n_w,
+			ipa3_ctx->uc_ctx.ering_wp_local);
+	ipa3_ctx->uc_ctx.ering_wp =
+		ipa3_ctx->uc_ctx.ering_wp_local;
+	ipa3_ctx->uc_ctx.ering_rp = 0;
+
+free_cmd:
+	dma_free_coherent(ipa3_ctx->uc_pdev,
+		cmd.size, cmd.base, cmd.phys_base);
+	return res;
+}
+
+int ipa3_uc_quota_monitor(uint64_t quota)
+{
+	int ind, res = 0;
+	struct ipa_mem_buffer cmd;
+	struct IpaQuotaMonitoring_t *quota_info;
+
+	cmd.size = sizeof(*quota_info);
+	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+		&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL)
+		return -ENOMEM;
+
+	quota_info = (struct IpaQuotaMonitoring_t *)cmd.base;
+	quota_info->protocol = IPA_HW_PROTOCOL_WDI3;
+	quota_info->params.WdiQM.Quota = quota;
+	quota_info->params.WdiQM.info.Num = 4;
+	ind = ipa3_ctx->fnr_info.hw_counter_offset +
+		UL_HW - 1;
+	quota_info->params.WdiQM.info.Offset[0] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+		sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.hw_counter_offset +
+		DL_ALL - 1;
+	quota_info->params.WdiQM.info.Offset[1] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+		sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.sw_counter_offset +
+		UL_HW_CACHE - 1;
+	quota_info->params.WdiQM.info.Offset[2] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+		sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.sw_counter_offset +
+		UL_WLAN_TX - 1;
+	quota_info->params.WdiQM.info.Offset[3] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+		sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	quota_info->params.WdiQM.info.Interval =
+		IPA_UC_MON_INTERVAL;
+
+	res = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+		IPA_CPU_2_HW_CMD_QUOTA_MONITORING,
+		IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+		false, 10 * HZ);
+
+	if (res) {
+		IPAERR(" faile to set quota %d, number offset %d\n",
+			quota_info->params.WdiQM.Quota,
+			quota_info->params.WdiQM.info.Num);
+		goto free_cmd;
+	}
+
+	IPADBG(" offest1 %d offest2 %d offest3 %d offest4 %d\n",
+			quota_info->params.WdiQM.info.Offset[0],
+			quota_info->params.WdiQM.info.Offset[1],
+			quota_info->params.WdiQM.info.Offset[2],
+			quota_info->params.WdiQM.info.Offset[3]);
+
+free_cmd:
+	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+
+	return res;
+}
+
+int ipa3_uc_bw_monitor(struct ipa_wdi_bw_info *info)
+{
+	int i, ind, res = 0;
+	struct ipa_mem_buffer cmd;
+	struct IpaBwMonitoring_t *bw_info;
+
+	if (!info)
+		return -EINVAL;
+
+	/* check max entry */
+	if (info->num > BW_MONITORING_MAX_THRESHOLD) {
+		IPAERR("%d, support max %d bw monitor\n", info->num,
+		BW_MONITORING_MAX_THRESHOLD);
+		return -EINVAL;
+	}
+
+	cmd.size = sizeof(*bw_info);
+	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+		&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL)
+		return -ENOMEM;
+
+	bw_info = (struct IpaBwMonitoring_t *)cmd.base;
+	bw_info->protocol = IPA_HW_PROTOCOL_WDI3;
+	bw_info->params.WdiBw.NumThresh = info->num;
+	bw_info->params.WdiBw.Stop = info->stop;
+	IPADBG("stop bw-monitor? %d\n", bw_info->params.WdiBw.Stop);
+
+	for (i = 0; i < info->num; i++) {
+		bw_info->params.WdiBw.BwThreshold[i] = info->threshold[i];
+		IPADBG("%d-st, %lu\n", i, bw_info->params.WdiBw.BwThreshold[i]);
+	}
+
+	bw_info->params.WdiBw.info.Num = 8;
+	ind = ipa3_ctx->fnr_info.hw_counter_offset +
+		UL_HW - 1;
+	bw_info->params.WdiBw.info.Offset[0] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+			sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.hw_counter_offset +
+		DL_HW - 1;
+	bw_info->params.WdiBw.info.Offset[1] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+			sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.hw_counter_offset +
+		DL_ALL - 1;
+	bw_info->params.WdiBw.info.Offset[2] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+			sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.hw_counter_offset +
+		UL_ALL - 1;
+	bw_info->params.WdiBw.info.Offset[3] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+			sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.sw_counter_offset +
+		UL_HW_CACHE - 1;
+	bw_info->params.WdiBw.info.Offset[4] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+			sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.sw_counter_offset +
+		DL_HW_CACHE - 1;
+	bw_info->params.WdiBw.info.Offset[5] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+			sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.sw_counter_offset +
+		UL_WLAN_TX - 1;
+	bw_info->params.WdiBw.info.Offset[6] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+			sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.sw_counter_offset +
+		DL_WLAN_TX - 1;
+	bw_info->params.WdiBw.info.Offset[7] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+			sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	bw_info->params.WdiBw.info.Interval =
+		IPA_UC_MON_INTERVAL;
+
+	res = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+		IPA_CPU_2_HW_CMD_BW_MONITORING,
+			IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+			false, 10 * HZ);
+
+	if (res) {
+		IPAERR(" faile to set bw %d level with %d coutners\n",
+			bw_info->params.WdiBw.NumThresh,
+			bw_info->params.WdiBw.info.Num);
+		goto free_cmd;
+	}
+
+free_cmd:
+	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+
+	return res;
+}
+
+int ipa3_set_wlan_tx_info(struct ipa_wdi_tx_info *info)
+{
+	struct ipa_flt_rt_stats stats;
+	struct ipacm_fnr_info fnr_info;
+
+	memset(&fnr_info, 0, sizeof(struct ipacm_fnr_info));
+	if (!ipa_get_fnr_info(&fnr_info)) {
+		IPAERR("FNR counter haven't configured\n");
+		return -EINVAL;
+	}
+
+	/* update sw counters */
+	memset(&stats, 0, sizeof(struct ipa_flt_rt_stats));
+	stats.num_bytes = info->sta_tx;
+	if (ipa_set_flt_rt_stats(fnr_info.sw_counter_offset +
+		UL_WLAN_TX, stats)) {
+		IPAERR("Failed to set stats to ul_wlan_tx %d\n",
+			fnr_info.sw_counter_offset + UL_WLAN_TX);
+		return -EINVAL;
+	}
+
+	stats.num_bytes = info->ap_tx;
+	if (ipa_set_flt_rt_stats(fnr_info.sw_counter_offset +
+		DL_WLAN_TX, stats)) {
+		IPAERR("Failed to set stats to dl_wlan_tx %d\n",
+			fnr_info.sw_counter_offset + DL_WLAN_TX);
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
index 4d296ec..865b436 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
@@ -30,6 +30,8 @@
 #define MAX_MHIP_CHANNELS 4
 #define MAX_USB_CHANNELS 2
 
+#define BW_QUOTA_MONITORING_MAX_ADDR_OFFSET 8
+#define BW_MONITORING_MAX_THRESHOLD 3
 /**
  *  @brief   Enum value determined based on the feature it
  *           corresponds to
@@ -98,6 +100,7 @@ enum ipa4_hw_protocol {
  * @IPA_HW_2_CPU_EVENT_ERROR : Event specify a system error is detected by the
  *  device
  * @IPA_HW_2_CPU_EVENT_LOG_INFO : Event providing logging specific information
+ * @IPA_HW_2_CPU_POST_EVNT_RING_NOTIFICAITON : Event to notify APPS
  */
 enum ipa3_hw_2_cpu_events {
 	IPA_HW_2_CPU_EVENT_NO_OP     =
@@ -106,6 +109,8 @@ enum ipa3_hw_2_cpu_events {
 		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
 	IPA_HW_2_CPU_EVENT_LOG_INFO  =
 		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+	IPA_HW_2_CPU_EVNT_RING_NOTIFY  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
 };
 
 /**
@@ -447,6 +452,8 @@ struct Ipa3HwStatsNTNInfoData_t {
  * uC stats calculation for a particular protocol
  * @IPA_CPU_2_HW_CMD_OFFLOAD_STATS_DEALLOC: Command to stop the
  * uC stats calculation for a particular protocol
+ * @IPA_CPU_2_HW_CMD_QUOTA_MONITORING : Command to start the Quota monitoring
+ * @IPA_CPU_2_HW_CMD_BW_MONITORING : Command to start the BW monitoring
  */
 enum ipa_cpu_2_hw_offload_commands {
 	IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP  =
@@ -461,6 +468,10 @@ enum ipa_cpu_2_hw_offload_commands {
 		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 5),
 	IPA_CPU_2_HW_CMD_OFFLOAD_STATS_DEALLOC =
 		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 6),
+	IPA_CPU_2_HW_CMD_QUOTA_MONITORING =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 7),
+	IPA_CPU_2_HW_CMD_BW_MONITORING =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 8),
 };
 
 /**
@@ -623,6 +634,48 @@ struct IpaHwOffloadSetUpCmdData_t {
 	union IpaHwSetUpCmd SetupCh_params;
 } __packed;
 
+struct IpaCommonMonitoringParams_t {
+	/* max 8 */
+	uint8_t  Num;
+	/* Sampling interval in ms */
+	uint8_t  Interval;
+	uint16_t Offset[BW_QUOTA_MONITORING_MAX_ADDR_OFFSET];
+} __packed; // 18 bytes
+
+struct IpaWdiQuotaMonitoringParams_t {
+	uint64_t Quota;
+	struct IpaCommonMonitoringParams_t info;
+} __packed;
+
+struct IpaWdiBwMonitoringParams_t {
+	uint64_t BwThreshold[BW_MONITORING_MAX_THRESHOLD];
+	struct IpaCommonMonitoringParams_t info;
+	uint8_t NumThresh;
+	/*Variable to Start Stop Bw Monitoring*/
+	uint8_t Stop;
+} __packed;
+
+union IpaQuotaMonitoringParams_t {
+	struct IpaWdiQuotaMonitoringParams_t WdiQM;
+} __packed;
+
+union IpaBwMonitoringParams_t {
+	struct IpaWdiBwMonitoringParams_t WdiBw;
+} __packed;
+
+struct IpaQuotaMonitoring_t {
+	/* indicates below union needs to be interpreted */
+	uint32_t protocol;
+	union IpaQuotaMonitoringParams_t  params;
+} __packed;
+
+struct IpaBwMonitoring_t {
+	/* indicates below union needs to be interpreted */
+	uint32_t protocol;
+	union IpaBwMonitoringParams_t   params;
+} __packed;
+
+
 struct IpaHwOffloadSetUpCmdData_t_v4_0 {
 	u32 protocol;
 	union IpaHwSetUpCmd SetupCh_params;
@@ -644,6 +697,46 @@ struct IpaHwOffloadCommonChCmdData_t {
 	union IpaHwCommonChCmd CommonCh_params;
 } __packed;
 
+enum EVENT_2_CPU_OPCODE {
+	BW_NOTIFY = 0x0,
+	QUOTA_NOTIFY = 0x1,
+};
+
+struct EventStructureBwMonitoring_t {
+	uint32_t ThresholdIndex;
+	uint64_t throughput;
+} __packed;
+
+struct EventStructureQuotaMonitoring_t {
+	/* indicate threshold has reached */
+	uint32_t ThreasholdReached;
+	uint64_t usage;
+} __packed;
+
+union EventParamFormat_t {
+	struct EventStructureBwMonitoring_t bw_param;
+	struct EventStructureQuotaMonitoring_t quota_param;
+} __packed;
+
+/* EVT RING STRUCTURE
+ *	|	Word|	bit	|	Field	|
+ *	-----------------------------
+ *	|	0	|0	-	8|	Protocol|
+ *	|		|8	-	16|	Reserved0|
+ *	|		|16	-	24|	Opcode	|
+ *	|		|24	-	31|	Reserved1|
+ *	|	1	|0	-	31|	Word1	|
+ *	|	2	|0	-	31|	Word2	|
+ *	|	3	|0	-	31|	Word3	|
+ */
+struct eventElement_t {
+	uint8_t Protocol;
+	uint8_t Reserved0;
+	uint8_t Opcode;
+	uint8_t Reserved1;
+	union EventParamFormat_t Value;
+} __packed;
+
 struct IpaHwOffloadCommonChCmdData_t_v4_0 {
 	u32 protocol;
 	union IpaHwCommonChCmd CommonCh_params;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 0857ad0..d17ff30 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -3898,8 +3898,8 @@ static void ipa_cfg_qtime(void)
 
 	/* Configure timestamp resolution */
 	memset(&ts_cfg, 0, sizeof(ts_cfg));
-	ts_cfg.dpl_timestamp_lsb = 0;
-	ts_cfg.dpl_timestamp_sel = false; /* DPL: use legacy 1ms resolution */
+	ts_cfg.dpl_timestamp_lsb = IPA_TAG_TIMER_TIMESTAMP_SHFT;
+	ts_cfg.dpl_timestamp_sel = true;
 	ts_cfg.tag_timestamp_lsb = IPA_TAG_TIMER_TIMESTAMP_SHFT;
 	ts_cfg.nat_timestamp_lsb = IPA_NAT_TIMER_TIMESTAMP_SHFT;
 	val = ipahal_read_reg(IPA_QTIME_TIMESTAMP_CFG);
@@ -4115,7 +4115,8 @@ u8 ipa3_get_qmb_master_sel(enum ipa_client_type client)
 		[client].qmb_master_sel;
 }
 
-/* ipa3_set_client() - provide client mapping
+/**
+ * ipa3_set_client() - provide client mapping
  * @client: client type
  *
  * Return value: none
@@ -4132,8 +4133,8 @@ void ipa3_set_client(int index, enum ipacm_client_enum client, bool uplink)
 		ipa3_ctx->ipacm_client[index].uplink = uplink;
 	}
 }
-
-/* ipa3_get_wlan_stats() - get ipa wifi stats
+/**
+ * ipa3_get_wlan_stats() - get ipa wifi stats
  *
  * Return value: success or failure
  */
@@ -4149,6 +4150,12 @@ int ipa3_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats)
 	return 0;
 }
 
+/**
+ * ipa3_set_wlan_quota() - set ipa wifi quota
+ * @wdi_quota: quota requirement
+ *
+ * Return value: success or failure
+ */
 int ipa3_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota)
 {
 	if (ipa3_ctx->uc_wdi_ctx.stats_notify) {
@@ -4162,6 +4169,23 @@ int ipa3_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota)
 }
 
 /**
+ * ipa3_inform_wlan_bw() - inform wlan bw-index
+ *
+ * Return value: success or failure
+ */
+int ipa3_inform_wlan_bw(struct ipa_inform_wlan_bw *wdi_bw)
+{
+	if (ipa3_ctx->uc_wdi_ctx.stats_notify) {
+		ipa3_ctx->uc_wdi_ctx.stats_notify(IPA_INFORM_WLAN_BW,
+			wdi_bw);
+	} else {
+		IPAERR("uc_wdi_ctx.stats_notify NULL\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+/**
  * ipa3_get_client() - provide client mapping
  * @client: client type
  *
@@ -6878,6 +6902,8 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
 	api_ctrl->ipa_resume_wdi_pipe = ipa3_resume_wdi_pipe;
 	api_ctrl->ipa_suspend_wdi_pipe = ipa3_suspend_wdi_pipe;
 	api_ctrl->ipa_get_wdi_stats = ipa3_get_wdi_stats;
+	api_ctrl->ipa_uc_bw_monitor = ipa3_uc_bw_monitor;
+	api_ctrl->ipa_set_wlan_tx_info = ipa3_set_wlan_tx_info;
 	api_ctrl->ipa_get_smem_restr_bytes = ipa3_get_smem_restr_bytes;
 	api_ctrl->ipa_broadcast_wdi_quota_reach_ind =
 			ipa3_broadcast_wdi_quota_reach_ind;
@@ -7624,7 +7650,7 @@ int ipa3_stop_gsi_channel(u32 clnt_hdl)
 	return res;
 }
 
-static int _ipa_suspend_pipe(enum ipa_client_type client, bool suspend)
+static int _ipa_suspend_resume_pipe(enum ipa_client_type client, bool suspend)
 {
 	int ipa_ep_idx;
 	struct ipa3_ep_context *ep;
@@ -7651,7 +7677,8 @@ static int _ipa_suspend_pipe(enum ipa_client_type client, bool suspend)
 	 * This needs to happen before starting the channel to make
 	 * sure we don't loose any interrupt
 	 */
-	if (!suspend && !atomic_read(&ep->sys->curr_polling_state))
+	if (!suspend && !atomic_read(&ep->sys->curr_polling_state) &&
+		!IPA_CLIENT_IS_APPS_PROD(client))
 		gsi_config_channel_mode(ep->gsi_chan_hdl,
 					GSI_CHAN_MODE_CALLBACK);
 
@@ -7669,6 +7696,10 @@ static int _ipa_suspend_pipe(enum ipa_client_type client, bool suspend)
 		}
 	}
 
+	/* Apps prod pipes use common event ring so cannot configure mode*/
+	if (IPA_CLIENT_IS_APPS_PROD(client))
+		return 0;
+
 	if (suspend) {
 		IPADBG("switch ch %ld to poll\n", ep->gsi_chan_hdl);
 		gsi_config_channel_mode(ep->gsi_chan_hdl, GSI_CHAN_MODE_POLL);
@@ -7683,19 +7714,104 @@ static int _ipa_suspend_pipe(enum ipa_client_type client, bool suspend)
 	return 0;
 }
 
+void ipa3_force_close_coal(void)
+{
+	struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
+	struct ipahal_imm_cmd_register_write reg_write_cmd = { 0 };
+	struct ipahal_reg_valmask valmask;
+	struct ipa3_desc desc;
+	int ep_idx;
+
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+	if (ep_idx == IPA_EP_NOT_ALLOCATED || (!ipa3_ctx->ep[ep_idx].valid))
+		return;
+
+	reg_write_cmd.skip_pipeline_clear = false;
+	reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_AGGR_FORCE_CLOSE);
+	ipahal_get_aggr_force_close_valmask(ep_idx, &valmask);
+	reg_write_cmd.value = valmask.val;
+	reg_write_cmd.value_mask = valmask.mask;
+	cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+		&reg_write_cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("fail construct register_write imm cmd\n");
+		ipa_assert();
+		return;
+	}
+	ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
+
+	IPADBG("Sending 1 descriptor for coal force close\n");
+	if (ipa3_send_cmd_timeout(1, &desc,
+		IPA_DMA_TASK_FOR_GSI_TIMEOUT_MSEC)) {
+		IPAERR("ipa3_send_cmd failed\n");
+		ipa_assert();
+	}
+	ipahal_destroy_imm_cmd(cmd_pyld);
+}
+
 int ipa3_suspend_apps_pipes(bool suspend)
 {
 	int res;
+	enum ipa_client_type client;
 
-	res = _ipa_suspend_pipe(IPA_CLIENT_APPS_LAN_CONS, suspend);
-	if (res)
-		return res;
+	if (suspend)
+		ipa3_force_close_coal();
 
-	res = _ipa_suspend_pipe(IPA_CLIENT_APPS_WAN_CONS, suspend);
-	if (res)
-		return res;
+	for (client = 0; client < IPA_CLIENT_MAX; client++) {
+		if (IPA_CLIENT_IS_APPS_CONS(client)) {
+			res = _ipa_suspend_resume_pipe(client, suspend);
+			if (res)
+				goto undo_cons;
+		}
+	}
+
+	if (suspend) {
+		struct ipahal_reg_tx_wrapper tx;
+		int ep_idx;
+
+		ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		if (ep_idx == IPA_EP_NOT_ALLOCATED ||
+				(!ipa3_ctx->ep[ep_idx].valid))
+			goto do_prod;
+
+		ipahal_read_reg_fields(IPA_STATE_TX_WRAPPER, &tx);
+		if (tx.coal_slave_open_frame != 0) {
+			IPADBG("COAL frame is open 0x%x\n",
+				tx.coal_slave_open_frame);
+			res = -EAGAIN;
+			goto undo_cons;
+		}
+
+		usleep_range(IPA_TAG_SLEEP_MIN_USEC, IPA_TAG_SLEEP_MAX_USEC);
+
+		res = ipahal_read_reg_n(IPA_SUSPEND_IRQ_INFO_EE_n,
+			ipa3_ctx->ee);
+		if (res) {
+			IPADBG("suspend irq is pending 0x%x\n", res);
+			goto undo_cons;
+		}
+	}
+do_prod:
+	for (client = 0; client < IPA_CLIENT_MAX; client++) {
+		if (IPA_CLIENT_IS_APPS_PROD(client)) {
+			res = _ipa_suspend_resume_pipe(client, suspend);
+			if (res)
+				goto undo_prod;
+		}
+	}
 
 	return 0;
+undo_prod:
+	for (client; client <= IPA_CLIENT_MAX && client >= 0; client--)
+		if (IPA_CLIENT_IS_APPS_PROD(client))
+			_ipa_suspend_resume_pipe(client, !suspend);
+	client = IPA_CLIENT_MAX;
+undo_cons:
+	for (client; client <= IPA_CLIENT_MAX && client >= 0; client--)
+		if (IPA_CLIENT_IS_APPS_CONS(client))
+			_ipa_suspend_resume_pipe(client, !suspend);
+	return res;
 }
 
 int ipa3_allocate_dma_task_for_gsi(void)
@@ -8282,6 +8398,30 @@ bool ipa3_is_apq(void)
 }
 
 /**
+ * ipa_get_fnr_info() - get fnr_info
+ *
+ * Return value: true if set, false if not set
+ *
+ */
+bool ipa_get_fnr_info(struct ipacm_fnr_info *fnr_info)
+{
+	bool res = false;
+
+	if (ipa3_ctx->fnr_info.valid) {
+		fnr_info->valid = ipa3_ctx->fnr_info.valid;
+		fnr_info->hw_counter_offset =
+			ipa3_ctx->fnr_info.hw_counter_offset;
+		fnr_info->sw_counter_offset =
+			ipa3_ctx->fnr_info.sw_counter_offset;
+		res = true;
+	} else {
+		IPAERR("fnr_info not valid!\n");
+		res = false;
+	}
+	return res;
+}
+
+/**
  * ipa3_disable_prefetch() - disable\enable tx prefetch
  *
  * @client: the client which is related to the TX where prefetch will be
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c b/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c
index f213178..91cef82 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c
@@ -725,6 +725,20 @@ int ipa3_enable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
 
 	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(ipa_ep_idx_tx));
 
+	/* start uC event ring */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
+		if (ipa3_ctx->uc_ctx.uc_loaded &&
+			!ipa3_ctx->uc_ctx.uc_event_ring_valid) {
+			if (ipa3_uc_setup_event_ring())	{
+				IPAERR("failed to set uc_event ring\n");
+				return -EFAULT;
+			}
+		} else
+			IPAERR("uc-loaded %d, ring-valid %d",
+			ipa3_ctx->uc_ctx.uc_loaded,
+			ipa3_ctx->uc_ctx.uc_event_ring_valid);
+	}
+
 	/* enable data path */
 	result = ipa3_enable_data_path(ipa_ep_idx_rx);
 	if (result) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 6bf24c86..440788f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -1180,6 +1180,112 @@ static void ipareg_parse_comp_cfg_v4_5(
 		IPA_COMP_CFG_IPA_FULL_FLUSH_WAIT_RSC_CLOSURE_EN_BMSK_v4_5);
 }
 
+static void ipareg_parse_state_tx_wrapper_v4_5(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_tx_wrapper *tx =
+		(struct ipahal_reg_tx_wrapper *)fields;
+
+	tx->tx0_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_TX0_IDLE_SHFT,
+		IPA_STATE_TX_WRAPPER_TX0_IDLE_BMSK);
+
+	tx->tx1_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_TX1_IDLE_SHFT,
+		IPA_STATE_TX_WRAPPER_TX1_IDLE_BMSK);
+
+	tx->ipa_prod_ackmngr_db_empty = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_SHFT,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_BMSK);
+
+	tx->ipa_prod_ackmngr_state_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_SHFT,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_BMSK);
+
+	tx->ipa_prod_prod_bresp_empty = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_SHFT,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_BMSK);
+
+	tx->ipa_prod_prod_bresp_toggle_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_SHFT,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_BMSK);
+
+	tx->ipa_mbim_pkt_fms_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_IPA_MBIM_PKT_FMS_IDLE_SHFT,
+		IPA_STATE_TX_WRAPPER_IPA_MBIM_PKT_FMS_IDLE_BMSK);
+
+	tx->mbim_direct_dma = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_MBIM_DIRECT_DMA_SHFT,
+		IPA_STATE_TX_WRAPPER_MBIM_DIRECT_DMA_BMSK);
+
+	tx->trnseq_force_valid = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_TRNSEQ_FORCE_VALID_SHFT,
+		IPA_STATE_TX_WRAPPER_TRNSEQ_FORCE_VALID_BMSK);
+
+	tx->pkt_drop_cnt_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_PKT_DROP_CNT_IDLE_SHFT,
+		IPA_STATE_TX_WRAPPER_PKT_DROP_CNT_IDLE_BMSK);
+
+	tx->nlo_direct_dma = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_NLO_DIRECT_DMA_SHFT,
+		IPA_STATE_TX_WRAPPER_NLO_DIRECT_DMA_BMSK);
+
+	tx->coal_direct_dma = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_COAL_DIRECT_DMA_SHFT,
+		IPA_STATE_TX_WRAPPER_COAL_DIRECT_DMA_BMSK);
+
+	tx->coal_slave_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_SHFT,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_BMSK);
+
+	tx->coal_slave_ctx_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_SHFT,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_BMSK);
+
+	tx->coal_slave_open_frame = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_SHFT,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_BMSK);
+}
+
+static void ipareg_parse_state_tx_wrapper_v4_7(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_tx_wrapper *tx =
+		(struct ipahal_reg_tx_wrapper *)fields;
+
+	tx->tx0_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_TX0_IDLE_SHFT_v4_7,
+		IPA_STATE_TX_WRAPPER_TX0_IDLE_BMSK_v4_7);
+
+	tx->tx1_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_TX1_IDLE_SHFT_v4_7,
+		IPA_STATE_TX_WRAPPER_TX1_IDLE_BMSK_v4_7);
+
+	tx->ipa_prod_ackmngr_db_empty = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_SHFT_v4_7,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_BMSK_v4_7);
+
+	tx->ipa_prod_ackmngr_state_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_SHFT_v4_7,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_BMSK_v4_7);
+
+	tx->ipa_prod_prod_bresp_empty = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_SHFT_v4_7,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_BMSK_v4_7);
+
+	tx->coal_slave_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_SHFT_v4_7,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_BMSK_v4_7);
+
+	tx->coal_slave_ctx_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_SHFT_v4_7,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_BMSK_v4_7);
+
+	tx->coal_slave_open_frame = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_SHFT_v4_7,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_BMSK_v4_7);
+}
+
 static void ipareg_construct_qcncm(
 	enum ipahal_reg_name reg, const void *fields, u32 *val)
 {
@@ -2968,6 +3074,9 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = {
 	[IPA_HW_v4_5][IPA_COMP_CFG] = {
 		ipareg_construct_comp_cfg_v4_5, ipareg_parse_comp_cfg_v4_5,
 		0x0000003C, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_STATE_TX_WRAPPER] = {
+		ipareg_construct_dummy, ipareg_parse_state_tx_wrapper_v4_5,
+		0x00000090, 0, 0, 0, 1 },
 	[IPA_HW_v4_5][IPA_STATE_FETCHER_MASK] = {
 		ipareg_construct_dummy, ipareg_parse_dummy,
 		-1, 0, 0, 0, 0},
@@ -3167,6 +3276,9 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = {
 	[IPA_HW_v4_5][IPA_COAL_QMAP_CFG] = {
 		ipareg_construct_coal_qmap_cfg, ipareg_parse_coal_qmap_cfg,
 		0x00001810, 0, 0, 0, 0},
+	[IPA_HW_v4_7][IPA_STATE_TX_WRAPPER] = {
+		ipareg_construct_dummy, ipareg_parse_state_tx_wrapper_v4_7,
+		0x00000090, 0, 0, 0, 1 },
 };
 
 /*
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
index 9fa862c..5191d3b1 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
@@ -380,6 +380,27 @@ struct ipahal_reg_comp_cfg {
 };
 
 /*
+ * struct ipahal_reg_tx_wrapper- IPA TX Wrapper state information
+ */
+struct ipahal_reg_tx_wrapper {
+	bool tx0_idle;
+	bool tx1_idle;
+	bool ipa_prod_ackmngr_db_empty;
+	bool ipa_prod_ackmngr_state_idle;
+	bool ipa_prod_prod_bresp_empty;
+	bool ipa_prod_prod_bresp_toggle_idle;
+	bool ipa_mbim_pkt_fms_idle;
+	u8 mbim_direct_dma;
+	bool trnseq_force_valid;
+	bool pkt_drop_cnt_idle;
+	u8 nlo_direct_dma;
+	u8 coal_direct_dma;
+	bool coal_slave_idle;
+	bool coal_slave_ctx_idle;
+	u8 coal_slave_open_frame;
+};
+
+/*
  * struct ipa_hash_tuple - Hash tuple members for flt and rt
  *  the fields tells if to be masked or not
  * @src_id: pipe number for flt, table index for rt
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
index 119ad8c..d934b91 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
@@ -626,5 +626,52 @@ int ipahal_reg_init(enum ipa_hw_type ipa_hw_type);
 #define IPA_COAL_QMAP_CFG_BMSK 0x1
 #define IPA_COAL_QMAP_CFG_SHFT 0
 
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_BMSK 0xf0000000
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_SHFT 0x1f
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_BMSK 0x100000
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_SHFT 0x10
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_BMSK 0x8000
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_SHFT 0xf
+#define IPA_STATE_TX_WRAPPER_COAL_DIRECT_DMA_BMSK 0x6000
+#define IPA_STATE_TX_WRAPPER_COAL_DIRECT_DMA_SHFT 0xd
+#define IPA_STATE_TX_WRAPPER_NLO_DIRECT_DMA_BMSK 0x1800
+#define IPA_STATE_TX_WRAPPER_NLO_DIRECT_DMA_SHFT 0xb
+#define IPA_STATE_TX_WRAPPER_PKT_DROP_CNT_IDLE_BMSK 0x400
+#define IPA_STATE_TX_WRAPPER_PKT_DROP_CNT_IDLE_SHFT 0xa
+#define IPA_STATE_TX_WRAPPER_TRNSEQ_FORCE_VALID_BMSK 0x200
+#define IPA_STATE_TX_WRAPPER_TRNSEQ_FORCE_VALID_SHFT 0x9
+#define IPA_STATE_TX_WRAPPER_MBIM_DIRECT_DMA_BMSK 0x180
+#define IPA_STATE_TX_WRAPPER_MBIM_DIRECT_DMA_SHFT 0x7
+#define IPA_STATE_TX_WRAPPER_IPA_MBIM_PKT_FMS_IDLE_BMSK 0x40
+#define IPA_STATE_TX_WRAPPER_IPA_MBIM_PKT_FMS_IDLE_SHFT 0x6
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_TOGGLE_IDLE_BMSK 0x20
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_TOGGLE_IDLE_SHFT 0x5
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_BMSK 0x10
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_SHFT 0x4
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_BMSK 0x8
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_SHFT 0x3
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_BMSK 0x4
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_SHFT 0x2
+#define IPA_STATE_TX_WRAPPER_TX1_IDLE_BMSK 0x2
+#define IPA_STATE_TX_WRAPPER_TX1_IDLE_SHFT 0x1
+#define IPA_STATE_TX_WRAPPER_TX0_IDLE_BMSK 0x1
+#define IPA_STATE_TX_WRAPPER_TX0_IDLE_SHFT 0x0
+
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_BMSK_v4_7 0xf0000000
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_SHFT_v4_7 28
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_BMSK_v4_7 0x80000
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_SHFT_v4_7 19
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_BMSK_v4_7 0x40000
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_SHFT_v4_7 18
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_BMSK_v4_7 0x10
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_SHFT_v4_7 4
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_BMSK_v4_7 0x8
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_SHFT_v4_7 3
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_BMSK_v4_7 0x4
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_SHFT_v4_7 2
+#define IPA_STATE_TX_WRAPPER_TX1_IDLE_BMSK_v4_7 0x2
+#define IPA_STATE_TX_WRAPPER_TX1_IDLE_SHFT_v4_7 1
+#define IPA_STATE_TX_WRAPPER_TX0_IDLE_BMSK_v4_7 0x1
+#define IPA_STATE_TX_WRAPPER_TX0_IDLE_SHFT_v4_7 0
 
 #endif /* _IPAHAL_REG_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 3390354..11c520f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -3104,10 +3104,15 @@ static int rmnet_ipa3_set_data_quota_wifi(struct wan_ioctl_set_data_quota *data)
 	IPAWANERR("iface name %s, quota %lu\n",
 		  data->interface_name, (unsigned long) data->quota_mbytes);
 
-	rc = ipa3_set_wlan_quota(&wifi_quota);
-	/* check if wlan-fw takes this quota-set */
-	if (!wifi_quota.set_valid)
-		rc = -EFAULT;
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
+		IPADBG("use ipa-uc for quota\n");
+		rc = ipa3_uc_quota_monitor(data->set_quota);
+	} else {
+		rc = ipa3_set_wlan_quota(&wifi_quota);
+		/* check if wlan-fw takes this quota-set */
+		if (!wifi_quota.set_valid)
+			rc = -EFAULT;
+	}
 	return rc;
 }
 
@@ -3589,36 +3594,42 @@ static int rmnet_ipa3_query_tethering_stats_hw(
 	data->ipv6_tx_bytes +=
 		con_stats->client[index].num_ipv6_bytes;
 
-	/* query WIGIG UL stats */
-	memset(con_stats, 0, sizeof(struct ipa_quota_stats_all));
-	rc = ipa_query_teth_stats(IPA_CLIENT_WIGIG_PROD, con_stats, reset);
-	if (rc) {
-		IPAERR("IPA_CLIENT_WIGIG_PROD query failed %d\n", rc);
-		kfree(con_stats);
-		return rc;
+	if (ipa3_get_ep_mapping(IPA_CLIENT_WIGIG_PROD) !=
+			IPA_EP_NOT_ALLOCATED) {
+		/* query WIGIG UL stats */
+		memset(con_stats, 0, sizeof(struct ipa_quota_stats_all));
+		rc = ipa_query_teth_stats(IPA_CLIENT_WIGIG_PROD, con_stats,
+									reset);
+		if (rc) {
+			IPAERR("IPA_CLIENT_WIGIG_PROD query failed %d\n", rc);
+			kfree(con_stats);
+			return rc;
+		}
+
+		if (rmnet_ipa3_ctx->ipa_config_is_apq)
+			index = IPA_CLIENT_MHI_PRIME_TETH_CONS;
+		else
+			index = IPA_CLIENT_Q6_WAN_CONS;
+
+		IPAWANDBG("wigig: v4_tx_p(%d) b(%lld) v6_tx_p(%d) b(%lld)\n",
+				con_stats->client[index].num_ipv4_pkts,
+				con_stats->client[index].num_ipv4_bytes,
+				con_stats->client[index].num_ipv6_pkts,
+				con_stats->client[index].num_ipv6_bytes);
+
+		/* update the WIGIG UL stats */
+		data->ipv4_tx_packets +=
+			con_stats->client[index].num_ipv4_pkts;
+		data->ipv6_tx_packets +=
+			con_stats->client[index].num_ipv6_pkts;
+		data->ipv4_tx_bytes +=
+			con_stats->client[index].num_ipv4_bytes;
+		data->ipv6_tx_bytes +=
+			con_stats->client[index].num_ipv6_bytes;
+	} else {
+		IPAWANDBG("IPA_CLIENT_WIGIG_PROD client not supported\n");
 	}
 
-	if (rmnet_ipa3_ctx->ipa_config_is_apq)
-		index = IPA_CLIENT_MHI_PRIME_TETH_CONS;
-	else
-		index = IPA_CLIENT_Q6_WAN_CONS;
-
-	IPAWANDBG("wigig: v4_tx_p(%d) b(%lld) v6_tx_p(%d) b(%lld)\n",
-		con_stats->client[index].num_ipv4_pkts,
-		con_stats->client[index].num_ipv4_bytes,
-		con_stats->client[index].num_ipv6_pkts,
-		con_stats->client[index].num_ipv6_bytes);
-
-	/* update the WIGIG UL stats */
-	data->ipv4_tx_packets +=
-		con_stats->client[index].num_ipv4_pkts;
-	data->ipv6_tx_packets +=
-		con_stats->client[index].num_ipv6_pkts;
-	data->ipv4_tx_bytes +=
-		con_stats->client[index].num_ipv4_bytes;
-	data->ipv6_tx_bytes +=
-		con_stats->client[index].num_ipv6_bytes;
-
 	IPAWANDBG("v4_tx_p(%lu) v6_tx_p(%lu) v4_tx_b(%lu) v6_tx_b(%lu)\n",
 		(unsigned long) data->ipv4_tx_packets,
 		(unsigned long) data->ipv6_tx_packets,
@@ -3628,6 +3639,144 @@ static int rmnet_ipa3_query_tethering_stats_hw(
 	return rc;
 }
 
+static int rmnet_ipa3_query_tethering_stats_fnr(
+	struct wan_ioctl_query_tether_stats_all *data)
+{
+	int rc = 0;
+	int num_counters;
+	struct ipa_ioc_flt_rt_query fnr_stats, fnr_stats_sw;
+	struct ipacm_fnr_info fnr_info;
+
+	memset(&fnr_stats, 0, sizeof(struct ipa_ioc_flt_rt_query));
+	memset(&fnr_stats_sw, 0, sizeof(struct ipa_ioc_flt_rt_query));
+	memset(&fnr_info, 0, sizeof(struct ipacm_fnr_info));
+
+	if (!ipa_get_fnr_info(&fnr_info)) {
+		IPAERR("FNR counter haven't configured\n");
+		return -EINVAL;
+	}
+
+	fnr_stats.start_id = fnr_info.hw_counter_offset + UL_HW;
+	fnr_stats.end_id = fnr_info.hw_counter_offset + DL_HW;
+	fnr_stats.reset = data->reset_stats;
+	num_counters = fnr_stats.end_id - fnr_stats.start_id + 1;
+
+	if (num_counters != 2) {
+		IPAWANERR("Only query 2 counters\n");
+		return -EINVAL;
+	}
+
+	fnr_stats.stats = (uint64_t)kcalloc(
+		num_counters,
+		sizeof(struct ipa_flt_rt_stats),
+		GFP_KERNEL);
+	if (!fnr_stats.stats) {
+		IPAERR("Failed to allocate memory for query hw-stats\n");
+		return -ENOMEM;
+	}
+
+	if (ipa_get_flt_rt_stats(&fnr_stats)) {
+		IPAERR("Failed to request stats from h/w\n");
+		rc = -EINVAL;
+		goto free_stats;
+	}
+
+	IPAWANDBG("ul: bytes = %llu, pkts = %u, pkts_hash = %u\n",
+	  ((struct ipa_flt_rt_stats *)fnr_stats.stats)[0].num_bytes,
+	  ((struct ipa_flt_rt_stats *)fnr_stats.stats)[0].num_pkts,
+	  ((struct ipa_flt_rt_stats *)fnr_stats.stats)[0].num_pkts_hash);
+	IPAWANDBG("dl: bytes = %llu, pkts = %u, pkts_hash = %u\n",
+	  ((struct ipa_flt_rt_stats *)fnr_stats.stats)[1].num_bytes,
+	  ((struct ipa_flt_rt_stats *)fnr_stats.stats)[1].num_pkts,
+	  ((struct ipa_flt_rt_stats *)fnr_stats.stats)[1].num_pkts_hash);
+
+	data->tx_bytes =
+		((struct ipa_flt_rt_stats *)fnr_stats.stats)[0].num_bytes;
+	data->rx_bytes =
+		((struct ipa_flt_rt_stats *)fnr_stats.stats)[1].num_bytes;
+
+	/* get the sw stats */
+	fnr_stats_sw.start_id = fnr_info.sw_counter_offset + UL_HW_CACHE;
+	fnr_stats_sw.end_id = fnr_info.sw_counter_offset + DL_HW_CACHE;
+	fnr_stats_sw.reset = data->reset_stats;
+	num_counters = fnr_stats_sw.end_id - fnr_stats_sw.start_id + 1;
+
+	if (num_counters != 2) {
+		IPAWANERR("Only query 2 counters\n");
+		return -EINVAL;
+	}
+
+	fnr_stats_sw.stats = (uint64_t)kcalloc(
+		num_counters,
+		sizeof(struct ipa_flt_rt_stats),
+		GFP_KERNEL);
+	if (!fnr_stats_sw.stats) {
+		IPAERR("Failed to allocate memory for query sw-stats\n");
+		return -ENOMEM;
+	}
+
+	if (ipa_get_flt_rt_stats(&fnr_stats_sw)) {
+		IPAERR("Failed to request stats from h/w\n");
+		rc = -EINVAL;
+		goto free_stats2;
+	}
+
+	IPAWANDBG("ul sw: bytes = %llu, pkts = %u, pkts_hash = %u\n",
+	  ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0].num_bytes,
+	  ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0].num_pkts,
+	  ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0].num_pkts_hash);
+	IPAWANDBG("dl sw: bytes = %llu, pkts = %u, pkts_hash = %u\n",
+	  ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1].num_bytes,
+	  ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1].num_pkts,
+	  ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1].num_pkts_hash);
+
+	/* update the sw-cache */
+	((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0].num_bytes +=
+	((struct ipa_flt_rt_stats *)fnr_stats.stats)[0].num_bytes;
+	((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0].num_pkts +=
+	((struct ipa_flt_rt_stats *)fnr_stats.stats)[0].num_pkts;
+	((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0].num_pkts_hash +=
+	((struct ipa_flt_rt_stats *)fnr_stats.stats)[0].num_pkts_hash;
+	((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1].num_bytes +=
+	((struct ipa_flt_rt_stats *)fnr_stats.stats)[1].num_bytes;
+	((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1].num_pkts +=
+	((struct ipa_flt_rt_stats *)fnr_stats.stats)[1].num_pkts;
+	((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1].num_pkts_hash +=
+	((struct ipa_flt_rt_stats *)fnr_stats.stats)[1].num_pkts_hash;
+
+	IPAWANDBG("ul sw: bytes = %llu, pkts = %u, pkts_hash = %u\n",
+	  ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0].num_bytes,
+	  ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0].num_pkts,
+	  ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0].num_pkts_hash);
+	IPAWANDBG("dl sw: bytes = %llu, pkts = %u, pkts_hash = %u\n",
+	  ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1].num_bytes,
+	  ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1].num_pkts,
+	  ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1].num_pkts_hash);
+	/* write to the sw cache */
+	if (ipa_set_flt_rt_stats(fnr_info.sw_counter_offset +
+		UL_HW_CACHE,
+		((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0])) {
+		IPAERR("Failed to set stats to sw-cache %d\n",
+			fnr_info.sw_counter_offset + UL_HW_CACHE);
+		rc = -EINVAL;
+		goto free_stats2;
+	}
+
+	if (ipa_set_flt_rt_stats(fnr_info.sw_counter_offset +
+		DL_HW_CACHE,
+		((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1])) {
+		IPAERR("Failed to set stats to sw-cache %d\n",
+			fnr_info.sw_counter_offset + DL_HW_CACHE);
+		rc = -EINVAL;
+		goto free_stats2;
+	}
+
+free_stats2:
+	kfree((void *)fnr_stats_sw.stats);
+free_stats:
+	kfree((void *)fnr_stats.stats);
+	return rc;
+}
 
 int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
 	bool reset)
@@ -3685,17 +3834,30 @@ int rmnet_ipa3_query_tethering_stats_all(
 			data->upstreamIface);
 	} else if (upstream_type == IPA_UPSTEAM_WLAN) {
 		IPAWANDBG_LOW(" query wifi-backhaul stats\n");
-		rc = rmnet_ipa3_query_tethering_stats_wifi(
-			&tether_stats, data->reset_stats);
-		if (rc) {
-			IPAWANERR_RL(
-				"wlan WAN_IOC_QUERY_TETHER_STATS failed\n");
-			return rc;
+		if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5 ||
+			!ipa3_ctx->hw_stats.enabled) {
+			IPAWANDBG("hw version %d,hw_stats.enabled %d\n",
+				ipa3_ctx->ipa_hw_type,
+				ipa3_ctx->hw_stats.enabled);
+			rc = rmnet_ipa3_query_tethering_stats_wifi(
+				&tether_stats, data->reset_stats);
+			if (rc) {
+				IPAWANERR_RL(
+					"wlan WAN_IOC_QUERY_TETHER_STATS failed\n");
+				return rc;
+			}
+			data->tx_bytes = tether_stats.ipv4_tx_bytes
+				+ tether_stats.ipv6_tx_bytes;
+			data->rx_bytes = tether_stats.ipv4_rx_bytes
+				+ tether_stats.ipv6_rx_bytes;
+		} else {
+			rc = rmnet_ipa3_query_tethering_stats_fnr(data);
+			if (rc) {
+				IPAWANERR_RL(
+					"wlan WAN_IOC_QUERY_TETHER_STATS failed\n");
+				return rc;
+			}
 		}
-		data->tx_bytes = tether_stats.ipv4_tx_bytes
-			+ tether_stats.ipv6_tx_bytes;
-		data->rx_bytes = tether_stats.ipv4_rx_bytes
-			+ tether_stats.ipv6_rx_bytes;
 	} else {
 		IPAWANDBG_LOW(" query modem-backhaul stats\n");
 		tether_stats.ipa_client = data->ipa_client;
diff --git a/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c b/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c
index c1e450d..caae2ba 100644
--- a/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c
+++ b/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c
@@ -728,6 +728,191 @@ static int ipa_test_hw_stats_set_sw_stats(void *priv)
 	return ret;
 }
 
+static int ipa_test_hw_stats_set_uc_event_ring(void *priv)
+{
+	struct ipa_ioc_flt_rt_counter_alloc *counter = NULL;
+	int ret = 0;
+
+	/* set uc event ring */
+	IPA_UT_INFO("========set uc event ring ========\n");
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
+		if (ipa3_ctx->uc_ctx.uc_loaded &&
+			!ipa3_ctx->uc_ctx.uc_event_ring_valid) {
+			if (ipa3_uc_setup_event_ring()) {
+				IPA_UT_ERR("failed to set uc_event ring\n");
+				ret = -EFAULT;
+			}
+		} else
+			IPA_UT_ERR("uc-loaded %d, ring-valid %d",
+				ipa3_ctx->uc_ctx.uc_loaded,
+				ipa3_ctx->uc_ctx.uc_event_ring_valid);
+	}
+	IPA_UT_INFO("================ done ============\n");
+
+	/* allocate counters */
+	IPA_UT_INFO("========set hw counter ========\n");
+
+	counter = kzalloc(sizeof(struct ipa_ioc_flt_rt_counter_alloc),
+					  GFP_KERNEL);
+	if (!counter)
+		return -ENOMEM;
+
+	counter->hw_counter.num_counters = 4;
+	counter->sw_counter.num_counters = 4;
+
+	/* allocate counters */
+	ret = ipa3_alloc_counter_id(counter);
+	if (ret < 0) {
+		IPA_UT_ERR("ipa3_alloc_counter_id fails\n");
+		ret = -ENOMEM;
+	}
+	ipa3_ctx->fnr_info.hw_counter_offset = counter->hw_counter.start_id;
+	ipa3_ctx->fnr_info.sw_counter_offset = counter->sw_counter.start_id;
+	IPA_UT_INFO("hw-offset %d, sw-offset %d\n",
+		ipa3_ctx->fnr_info.hw_counter_offset,
+			ipa3_ctx->fnr_info.sw_counter_offset);
+
+	kfree(counter);
+	return ret;
+}
+
+static int ipa_test_hw_stats_set_quota(void *priv)
+{
+	int ret;
+	uint64_t quota = 500;
+
+	IPA_UT_INFO("========set quota ========\n");
+	ret = ipa3_uc_quota_monitor(quota);
+	if (ret < 0) {
+		IPA_UT_ERR("ipa3_uc_quota_monitor fails\n");
+		ret = -ENOMEM;
+	}
+	IPA_UT_INFO("================ done ============\n");
+	return ret;
+}
+
+static int ipa_test_hw_stats_set_bw(void *priv)
+{
+	int ret;
+	struct ipa_wdi_bw_info *info = NULL;
+
+	IPA_UT_INFO("========set BW voting ========\n");
+	info = kzalloc(sizeof(struct ipa_wdi_bw_info),
+					  GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	info->num = 3;
+	info->threshold[0] = 200;
+	info->threshold[1] = 400;
+	info->threshold[2] = 600;
+
+	ret = ipa3_uc_bw_monitor(info);
+	if (ret < 0) {
+		IPA_UT_ERR("ipa3_uc_bw_monitor fails\n");
+		ret = -ENOMEM;
+	}
+
+	IPA_UT_INFO("================ done ============\n");
+
+	kfree(info);
+	return ret;
+}
+static int ipa_test_hw_stats_hit_quota(void *priv)
+{
+	int ret = 0, counter_index, i;
+	struct ipa_flt_rt_stats stats;
+
+	/* set sw counters */
+	IPA_UT_INFO("========set quota hit========\n");
+	counter_index = ipa3_ctx->fnr_info.sw_counter_offset + UL_WLAN_TX;
+
+	for (i = 0; i < 5; i++) {
+		IPA_UT_INFO("========set 100 ========\n");
+		memset(&stats, 0, sizeof(struct ipa_flt_rt_stats));
+		stats.num_bytes = 100;
+		stats.num_pkts = 69;
+		IPA_UT_INFO(
+			"set counter %u pkt_cnt %u bytes cnt %llu\n",
+			counter_index, stats.num_pkts, stats.num_bytes);
+		ret = ipa_set_flt_rt_stats(counter_index, stats);
+		if (ret < 0) {
+			IPA_UT_ERR("ipa_set_flt_rt_stats fails\n");
+			ret = -ENOMEM;
+		}
+		msleep(1000);
+		IPA_UT_INFO("========set 200 ========\n");
+		memset(&stats, 0, sizeof(struct ipa_flt_rt_stats));
+		stats.num_bytes = 200;
+		stats.num_pkts = 69;
+		IPA_UT_INFO(
+			"set counter %u pkt_cnt %u bytes cnt %llu\n",
+			counter_index, stats.num_pkts, stats.num_bytes);
+		ret = ipa_set_flt_rt_stats(counter_index, stats);
+		if (ret < 0) {
+			IPA_UT_ERR("ipa_set_flt_rt_stats fails\n");
+			ret = -ENOMEM;
+		}
+		msleep(1000);
+		IPA_UT_INFO("========set 300 ========\n");
+		memset(&stats, 0, sizeof(struct ipa_flt_rt_stats));
+		stats.num_bytes = 300;
+		stats.num_pkts = 69;
+		IPA_UT_INFO(
+			"set counter %u pkt_cnt %u bytes cnt %llu\n",
+			counter_index, stats.num_pkts, stats.num_bytes);
+		ret = ipa_set_flt_rt_stats(counter_index, stats);
+		if (ret < 0) {
+			IPA_UT_ERR("ipa_set_flt_rt_stats fails\n");
+			ret = -ENOMEM;
+		}
+		msleep(1000);
+		IPA_UT_INFO("========set 500 ========\n");
+		memset(&stats, 0, sizeof(struct ipa_flt_rt_stats));
+		stats.num_bytes = 500;
+		stats.num_pkts = 69;
+		IPA_UT_INFO(
+			"set counter %u pkt_cnt %u bytes cnt %llu\n",
+			counter_index, stats.num_pkts, stats.num_bytes);
+		ret = ipa_set_flt_rt_stats(counter_index, stats);
+		if (ret < 0) {
+			IPA_UT_ERR("ipa_set_flt_rt_stats fails\n");
+			ret = -ENOMEM;
+		}
+		msleep(1000);
+		IPA_UT_INFO("========set 600 ========\n");
+		memset(&stats, 0, sizeof(struct ipa_flt_rt_stats));
+		stats.num_bytes = 600;
+		stats.num_pkts = 69;
+		IPA_UT_INFO(
+			"set counter %u pkt_cnt %u bytes cnt %llu\n",
+			counter_index, stats.num_pkts, stats.num_bytes);
+		ret = ipa_set_flt_rt_stats(counter_index, stats);
+		if (ret < 0) {
+			IPA_UT_ERR("ipa_set_flt_rt_stats fails\n");
+			ret = -ENOMEM;
+		}
+		msleep(1000);
+		IPA_UT_INFO("========set 1000 ========\n");
+		memset(&stats, 0, sizeof(struct ipa_flt_rt_stats));
+		stats.num_bytes = 1000;
+		stats.num_pkts = 69;
+		IPA_UT_INFO(
+			"set counter %u pkt_cnt %u bytes cnt %llu\n",
+			counter_index, stats.num_pkts, stats.num_bytes);
+		ret = ipa_set_flt_rt_stats(counter_index, stats);
+		if (ret < 0) {
+			IPA_UT_ERR("ipa_set_flt_rt_stats fails\n");
+			ret = -ENOMEM;
+		}
+	}
+	IPA_UT_INFO("================ done ============\n");
+	return ret;
+}
+
+
+
 /* Suite definition block */
 IPA_UT_DEFINE_SUITE_START(hw_stats, "HW stats test",
 	ipa_test_hw_stats_suite_setup, ipa_test_hw_stats_suite_teardown)
@@ -758,4 +943,20 @@ IPA_UT_DEFINE_SUITE_START(hw_stats, "HW stats test",
 		ipa_test_hw_stats_set_sw_stats, false,
 		IPA_HW_v4_5, IPA_HW_MAX),
 
+	IPA_UT_ADD_TEST(set_uc_evtring, "Set uc event ring",
+		ipa_test_hw_stats_set_uc_event_ring, false,
+		IPA_HW_v4_5, IPA_HW_MAX),
+
+	IPA_UT_ADD_TEST(set_quota, "Set quota",
+		ipa_test_hw_stats_set_quota, false,
+		IPA_HW_v4_5, IPA_HW_MAX),
+
+	IPA_UT_ADD_TEST(set_bw_voting, "Set bw_voting",
+		ipa_test_hw_stats_set_bw, false,
+		IPA_HW_v4_5, IPA_HW_MAX),
+
+	IPA_UT_ADD_TEST(hit_quota, "quota hits",
+		ipa_test_hw_stats_hit_quota, false,
+		IPA_HW_v4_5, IPA_HW_MAX),
+
 } IPA_UT_DEFINE_SUITE_END(hw_stats);
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index 528b461..b485eb9 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -126,6 +126,7 @@ struct msm11ad_ctx {
 	struct cpumask boost_cpu_1;
 
 	bool keep_radio_on_during_sleep;
+	bool use_ap_ps;
 	int features;
 };
 
@@ -1054,6 +1055,7 @@ static int msm_11ad_probe(struct platform_device *pdev)
 	 *	qcom,msm-bus,vectors-KBps =
 	 *		<100 512 0 0>,
 	 *		<100 512 600000 800000>;
+	 *	qcom,use-ap-power-save; (ctx->use_ap_ps)
 	 *};
 	 * rc_node stands for "qcom,pcie", selected entries:
 	 * cell-index = <1>; (ctx->rc_index)
@@ -1094,6 +1096,8 @@ static int msm_11ad_probe(struct platform_device *pdev)
 		rc = -EINVAL;
 		goto out_module;
 	}
+	ctx->use_ap_ps = of_property_read_bool(of_node,
+					       "qcom,use-ap-power-save");
 
 	/*== execute ==*/
 	/* turn device on */
@@ -1558,6 +1562,12 @@ static int ops_get_capa(void *handle)
 	if (!ctx->smmu_s1_bypass)
 		capa |= BIT(WIL_PLATFORM_CAPA_SMMU);
 
+	pr_debug("%s: use AP power save is %s\n", __func__, ctx->use_ap_ps ?
+		 "allowed" : "not allowed");
+
+	if (ctx->use_ap_ps)
+		capa |= BIT(WIL_PLATFORM_CAPA_AP_PS);
+
 	return capa;
 }
 
diff --git a/drivers/platform/msm/msm_ext_display.c b/drivers/platform/msm/msm_ext_display.c
index 1fbd8fc..33ff3d8 100644
--- a/drivers/platform/msm/msm_ext_display.c
+++ b/drivers/platform/msm/msm_ext_display.c
@@ -108,8 +108,6 @@ static int msm_ext_disp_add_intf_data(struct msm_ext_disp *ext_disp,
 	list_for_each(pos, &ext_disp->display_list)
 		count++;
 
-	data->codec.stream_id = count;
-
 	list_add(&node->list, &ext_disp->display_list);
 
 
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index 12cc4f6..a0ae712 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -103,6 +103,7 @@ enum fg_debug_flag {
 	FG_BUS_READ		= BIT(6), /* Show REGMAP reads */
 	FG_CAP_LEARN		= BIT(7), /* Show capacity learning */
 	FG_TTF			= BIT(8), /* Show time to full */
+	FG_FVSS			= BIT(9), /* Show FVSS */
 };
 
 /* SRAM access */
diff --git a/drivers/power/supply/qcom/qg-battery-profile.c b/drivers/power/supply/qcom/qg-battery-profile.c
index 928fb6e..69fff08 100644
--- a/drivers/power/supply/qcom/qg-battery-profile.c
+++ b/drivers/power/supply/qcom/qg-battery-profile.c
@@ -57,6 +57,8 @@ static struct tables table[] = {
 
 static struct qg_battery_data *the_battery;
 
+static void qg_battery_profile_free(void);
+
 static int qg_battery_data_open(struct inode *inode, struct file *file)
 {
 	struct qg_battery_data *battery = container_of(inode->i_cdev,
@@ -427,42 +429,56 @@ int qg_batterydata_init(struct device_node *profile_node)
 	int rc = 0;
 	struct qg_battery_data *battery;
 
-	battery = kzalloc(sizeof(*battery), GFP_KERNEL);
-	if (!battery)
-		return -ENOMEM;
+	/*
+	 * If a battery profile is already initialized, free the existing
+	 * profile data and re-allocate and load the new profile. This is
+	 * required for multi-profile load support.
+	 */
+	if (the_battery) {
+		battery = the_battery;
+		battery->profile_node = NULL;
+		qg_battery_profile_free();
+	} else {
+		battery = kzalloc(sizeof(*battery), GFP_KERNEL);
+		if (!battery)
+			return -ENOMEM;
+		/* char device to access battery-profile data */
+		rc = alloc_chrdev_region(&battery->dev_no, 0, 1,
+							"qg_battery");
+		if (rc < 0) {
+			pr_err("Failed to allocate chrdev rc=%d\n", rc);
+			goto free_battery;
+		}
+
+		cdev_init(&battery->battery_cdev, &qg_battery_data_fops);
+		rc = cdev_add(&battery->battery_cdev,
+						battery->dev_no, 1);
+		if (rc) {
+			pr_err("Failed to add battery_cdev rc=%d\n", rc);
+			goto unregister_chrdev;
+		}
+
+		battery->battery_class = class_create(THIS_MODULE,
+							"qg_battery");
+		if (IS_ERR_OR_NULL(battery->battery_class)) {
+			pr_err("Failed to create qg-battery class\n");
+			rc = -ENODEV;
+			goto delete_cdev;
+		}
+
+		battery->battery_device = device_create(
+						battery->battery_class,
+						NULL, battery->dev_no,
+						NULL, "qg_battery");
+		if (IS_ERR_OR_NULL(battery->battery_device)) {
+			pr_err("Failed to create battery_device device\n");
+			rc = -ENODEV;
+			goto delete_cdev;
+		}
+		the_battery = battery;
+	}
 
 	battery->profile_node = profile_node;
-
-	/* char device to access battery-profile data */
-	rc = alloc_chrdev_region(&battery->dev_no, 0, 1, "qg_battery");
-	if (rc < 0) {
-		pr_err("Failed to allocate chrdev rc=%d\n", rc);
-		goto free_battery;
-	}
-
-	cdev_init(&battery->battery_cdev, &qg_battery_data_fops);
-	rc = cdev_add(&battery->battery_cdev, battery->dev_no, 1);
-	if (rc) {
-		pr_err("Failed to add battery_cdev rc=%d\n", rc);
-		goto unregister_chrdev;
-	}
-
-	battery->battery_class = class_create(THIS_MODULE, "qg_battery");
-	if (IS_ERR_OR_NULL(battery->battery_class)) {
-		pr_err("Failed to create qg-battery class\n");
-		rc = -ENODEV;
-		goto delete_cdev;
-	}
-
-	battery->battery_device = device_create(battery->battery_class,
-					NULL, battery->dev_no,
-					NULL, "qg_battery");
-	if (IS_ERR_OR_NULL(battery->battery_device)) {
-		pr_err("Failed to create battery_device device\n");
-		rc = -ENODEV;
-		goto delete_cdev;
-	}
-
 	/* parse the battery profile */
 	rc = qg_parse_battery_profile(battery);
 	if (rc < 0) {
@@ -470,9 +486,7 @@ int qg_batterydata_init(struct device_node *profile_node)
 		goto destroy_device;
 	}
 
-	the_battery = battery;
-
-	pr_info("QG Battery-profile loaded, '/dev/qg_battery' created!\n");
+	pr_info("QG Battery-profile loaded\n");
 
 	return 0;
 
@@ -487,27 +501,31 @@ int qg_batterydata_init(struct device_node *profile_node)
 	return rc;
 }
 
-void qg_batterydata_exit(void)
+static void qg_battery_profile_free(void)
 {
 	int i, j;
 
+	/* delete all the battery profile memory */
+	for (i = 0; i < TABLE_MAX; i++) {
+		kfree(the_battery->profile[i].name);
+		kfree(the_battery->profile[i].row_entries);
+		kfree(the_battery->profile[i].col_entries);
+		for (j = 0; j < the_battery->profile[i].rows; j++) {
+			if (the_battery->profile[i].data)
+				kfree(the_battery->profile[i].data[j]);
+		}
+		kfree(the_battery->profile[i].data);
+	}
+}
+
+void qg_batterydata_exit(void)
+{
 	if (the_battery) {
 		/* unregister the device node */
 		device_destroy(the_battery->battery_class, the_battery->dev_no);
 		cdev_del(&the_battery->battery_cdev);
 		unregister_chrdev_region(the_battery->dev_no, 1);
-
-		/* delete all the battery profile memory */
-		for (i = 0; i < TABLE_MAX; i++) {
-			kfree(the_battery->profile[i].name);
-			kfree(the_battery->profile[i].row_entries);
-			kfree(the_battery->profile[i].col_entries);
-			for (j = 0; j < the_battery->profile[i].rows; j++) {
-				if (the_battery->profile[i].data)
-					kfree(the_battery->profile[i].data[j]);
-			}
-			kfree(the_battery->profile[i].data);
-		}
+		qg_battery_profile_free();
 	}
 
 	kfree(the_battery);
diff --git a/drivers/power/supply/qcom/qg-core.h b/drivers/power/supply/qcom/qg-core.h
index a0c07de..b011c86 100644
--- a/drivers/power/supply/qcom/qg-core.h
+++ b/drivers/power/supply/qcom/qg-core.h
@@ -69,6 +69,7 @@ struct qg_dt {
 	bool			qg_sleep_config;
 	bool			qg_fast_chg_cfg;
 	bool			fvss_enable;
+	bool			multi_profile_load;
 };
 
 struct qg_esr_data {
@@ -85,10 +86,12 @@ struct qpnp_qg {
 	struct pmic_revid_data	*pmic_rev_id;
 	struct regmap		*regmap;
 	struct qpnp_vadc_chip	*vadc_dev;
+	struct soh_profile      *sp;
 	struct power_supply	*qg_psy;
 	struct class		*qg_class;
 	struct device		*qg_device;
 	struct cdev		qg_cdev;
+	struct device_node      *batt_node;
 	struct dentry		*dfs_root;
 	dev_t			dev_no;
 	struct work_struct	udata_work;
@@ -170,6 +173,7 @@ struct qpnp_qg {
 	int			sys_soc;
 	int			last_adj_ssoc;
 	int			recharge_soc;
+	int			batt_age_level;
 	struct alarm		alarm_timer;
 	u32			sdam_data[SDAM_MAX];
 
diff --git a/drivers/power/supply/qcom/qg-reg.h b/drivers/power/supply/qcom/qg-reg.h
index 6e001de..0af5993 100644
--- a/drivers/power/supply/qcom/qg-reg.h
+++ b/drivers/power/supply/qcom/qg-reg.h
@@ -121,6 +121,7 @@
 #define QG_SDAM_ESR_DISCHARGE_DELTA_OFFSET	0x6E /* 4-byte 0x6E-0x71 */
 #define QG_SDAM_ESR_CHARGE_SF_OFFSET		0x72 /* 2-byte 0x72-0x73 */
 #define QG_SDAM_ESR_DISCHARGE_SF_OFFSET		0x74 /* 2-byte 0x74-0x75 */
+#define QG_SDAM_BATT_AGE_LEVEL_OFFSET		0x76 /* 1-byte 0x76 */
 #define QG_SDAM_MAGIC_OFFSET			0x80 /* 4-byte 0x80-0x83 */
 #define QG_SDAM_MAX_OFFSET			0xA4
 
diff --git a/drivers/power/supply/qcom/qg-sdam.c b/drivers/power/supply/qcom/qg-sdam.c
index e641d11..aa357e5 100644
--- a/drivers/power/supply/qcom/qg-sdam.c
+++ b/drivers/power/supply/qcom/qg-sdam.c
@@ -81,6 +81,11 @@ static struct qg_sdam_info sdam_info[] = {
 		.offset = QG_SDAM_ESR_DISCHARGE_SF_OFFSET,
 		.length = 2,
 	},
+	[SDAM_BATT_AGE_LEVEL] = {
+		.name   = "SDAM_BATT_AGE_LEVEL_OFFSET",
+		.offset = QG_SDAM_BATT_AGE_LEVEL_OFFSET,
+		.length = 1,
+	},
 	[SDAM_MAGIC] = {
 		.name	= "SDAM_MAGIC_OFFSET",
 		.offset = QG_SDAM_MAGIC_OFFSET,
diff --git a/drivers/power/supply/qcom/qg-sdam.h b/drivers/power/supply/qcom/qg-sdam.h
index b0e48ed..32ce6f8 100644
--- a/drivers/power/supply/qcom/qg-sdam.h
+++ b/drivers/power/supply/qcom/qg-sdam.h
@@ -24,6 +24,7 @@ enum qg_sdam_param {
 	SDAM_ESR_CHARGE_SF,
 	SDAM_ESR_DISCHARGE_SF,
 	SDAM_MAGIC,
+	SDAM_BATT_AGE_LEVEL,
 	SDAM_MAX,
 };
 
diff --git a/drivers/power/supply/qcom/qg-soc.c b/drivers/power/supply/qcom/qg-soc.c
index 690ac02..3ecd32b 100644
--- a/drivers/power/supply/qcom/qg-soc.c
+++ b/drivers/power/supply/qcom/qg-soc.c
@@ -105,7 +105,7 @@ static int qg_process_fvss_soc(struct qpnp_qg *chip, int sys_soc)
 	int soc_vbat = 0, wt_vbat = 0, wt_sys = 0, soc_fvss = 0;
 
 	if (!chip->dt.fvss_enable)
-		return 0;
+		goto exit_soc_scale;
 
 	if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING)
 		goto exit_soc_scale;
@@ -178,7 +178,7 @@ int qg_adjust_sys_soc(struct qpnp_qg *chip)
 
 	chip->sys_soc = CAP(QG_MIN_SOC, QG_MAX_SOC, chip->sys_soc);
 
-	if (chip->sys_soc == QG_MIN_SOC) {
+	if (chip->sys_soc <= 50) { /* 0.5% */
 		/* Hold SOC to 1% of VBAT has not dropped below cutoff */
 		rc = qg_get_battery_voltage(chip, &vbat_uv);
 		if (!rc && vbat_uv >= (vcutoff_uv + VBAT_LOW_HYST_UV))
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen4.c b/drivers/power/supply/qcom/qpnp-fg-gen4.c
index a868f1d..4bfa42f 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen4.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen4.c
@@ -296,6 +296,7 @@ struct fg_gen4_chip {
 	int			soc_scale_msoc;
 	int			prev_soc_scale_msoc;
 	int			soc_scale_slope;
+	int			msoc_actual;
 	int			vbatt_avg;
 	int			vbatt_now;
 	int			vbatt_res;
@@ -1122,7 +1123,7 @@ static int fg_gen4_get_prop_soc_scale(struct fg_gen4_chip *chip)
 	chip->vbatt_now = DIV_ROUND_CLOSEST(chip->vbatt_now, 1000);
 	chip->vbatt_avg = DIV_ROUND_CLOSEST(chip->vbatt_avg, 1000);
 	chip->vbatt_res = chip->vbatt_avg - chip->dt.cutoff_volt_mv;
-	pr_debug("FVSS: Vbatt now=%d Vbatt avg=%d Vbatt res=%d\n",
+	fg_dbg(fg, FG_FVSS, "Vbatt now=%d Vbatt avg=%d Vbatt res=%d\n",
 		chip->vbatt_now, chip->vbatt_avg, chip->vbatt_res);
 
 	return rc;
@@ -3215,7 +3216,7 @@ static int fg_gen4_enter_soc_scale(struct fg_gen4_chip *chip)
 	}
 
 	chip->soc_scale_mode = true;
-	pr_debug("FVSS: Enter FVSS mode, SOC=%d slope=%d timer=%d\n", soc,
+	fg_dbg(fg, FG_FVSS, "Enter FVSS mode, SOC=%d slope=%d timer=%d\n", soc,
 		chip->soc_scale_slope, chip->scale_timer);
 	alarm_start_relative(&chip->soc_scale_alarm_timer,
 				ms_to_ktime(chip->scale_timer));
@@ -3245,6 +3246,8 @@ static void fg_gen4_write_scale_msoc(struct fg_gen4_chip *chip)
 
 static void fg_gen4_exit_soc_scale(struct fg_gen4_chip *chip)
 {
+	struct fg_dev *fg = &chip->fg;
+
 	if (chip->soc_scale_mode) {
 		alarm_cancel(&chip->soc_scale_alarm_timer);
 		cancel_work_sync(&chip->soc_scale_work);
@@ -3253,13 +3256,13 @@ static void fg_gen4_exit_soc_scale(struct fg_gen4_chip *chip)
 	}
 
 	chip->soc_scale_mode = false;
-	pr_debug("FVSS: Exit FVSS mode\n");
+	fg_dbg(fg, FG_FVSS, "Exit FVSS mode\n");
 }
 
 static int fg_gen4_validate_soc_scale_mode(struct fg_gen4_chip *chip)
 {
 	struct fg_dev *fg = &chip->fg;
-	int rc, msoc_actual;
+	int rc;
 
 	if (!chip->dt.soc_scale_mode)
 		return 0;
@@ -3270,7 +3273,7 @@ static int fg_gen4_validate_soc_scale_mode(struct fg_gen4_chip *chip)
 		goto fail_soc_scale;
 	}
 
-	rc = fg_get_msoc(fg, &msoc_actual);
+	rc = fg_get_msoc(fg, &chip->msoc_actual);
 	if (rc < 0) {
 		pr_err("Failed to get msoc rc=%d\n", rc);
 		goto fail_soc_scale;
@@ -3289,7 +3292,7 @@ static int fg_gen4_validate_soc_scale_mode(struct fg_gen4_chip *chip)
 		 * Stay in SOC scale mode till H/W SOC catch scaled SOC
 		 * while charging.
 		 */
-		if (msoc_actual >= chip->soc_scale_msoc)
+		if (chip->msoc_actual >= chip->soc_scale_msoc)
 			fg_gen4_exit_soc_scale(chip);
 	}
 
@@ -3966,8 +3969,24 @@ static void soc_scale_work(struct work_struct *work)
 	mutex_lock(&chip->soc_scale_lock);
 	soc = DIV_ROUND_CLOSEST(chip->vbatt_res,
 				chip->soc_scale_slope);
-	/* If calculated SOC is higher than current SOC, report current SOC */
-	if (soc > chip->prev_soc_scale_msoc) {
+	chip->soc_scale_msoc = soc;
+	chip->scale_timer = chip->dt.scale_timer_ms;
+
+	fg_dbg(fg, FG_FVSS, "soc: %d last soc: %d msoc_actual: %d\n", soc,
+			chip->prev_soc_scale_msoc, chip->msoc_actual);
+	if ((chip->prev_soc_scale_msoc - chip->msoc_actual) > soc_thr_percent) {
+		/*
+		 * If difference between previous SW calculated SOC and HW SOC
+		 * is higher than SOC threshold, then handle this by
+		 * showing previous SW SOC - SOC threshold.
+		 */
+		chip->soc_scale_msoc = chip->prev_soc_scale_msoc -
+					soc_thr_percent;
+	} else if (soc > chip->prev_soc_scale_msoc) {
+		/*
+		 * If calculated SOC is higher than current SOC, report current
+		 * SOC
+		 */
 		chip->soc_scale_msoc = chip->prev_soc_scale_msoc;
 		chip->scale_timer = chip->dt.scale_timer_ms;
 	} else if ((chip->prev_soc_scale_msoc - soc) > soc_thr_percent) {
@@ -3982,9 +4001,6 @@ static void soc_scale_work(struct work_struct *work)
 					soc_thr_percent;
 		chip->scale_timer = chip->dt.scale_timer_ms /
 				(chip->prev_soc_scale_msoc - soc);
-	} else {
-		chip->soc_scale_msoc = soc;
-		chip->scale_timer = chip->dt.scale_timer_ms;
 	}
 
 	if (chip->soc_scale_msoc < 0)
@@ -3997,7 +4013,7 @@ static void soc_scale_work(struct work_struct *work)
 	}
 
 	chip->prev_soc_scale_msoc = chip->soc_scale_msoc;
-	pr_debug("FVSS: Calculated SOC=%d SOC reported=%d timer resolution=%d\n",
+	fg_dbg(fg, FG_FVSS, "Calculated SOC=%d SOC reported=%d timer resolution=%d\n",
 		soc, chip->soc_scale_msoc, chip->scale_timer);
 	alarm_start_relative(&chip->soc_scale_alarm_timer,
 				ms_to_ktime(chip->scale_timer));
diff --git a/drivers/power/supply/qcom/qpnp-qg.c b/drivers/power/supply/qcom/qpnp-qg.c
index 3c8ec13..dd72e9b 100644
--- a/drivers/power/supply/qcom/qpnp-qg.c
+++ b/drivers/power/supply/qcom/qpnp-qg.c
@@ -91,6 +91,7 @@ static struct attribute *qg_attrs[] = {
 ATTRIBUTE_GROUPS(qg);
 
 static int qg_process_rt_fifo(struct qpnp_qg *chip);
+static int qg_load_battery_profile(struct qpnp_qg *chip);
 
 static bool is_battery_present(struct qpnp_qg *chip)
 {
@@ -1540,8 +1541,6 @@ static int qg_get_learned_capacity(void *data, int64_t *learned_cap_uah)
 	}
 	*learned_cap_uah = cc_mah * 1000;
 
-	qg_dbg(chip, QG_DEBUG_ALG_CL, "Retrieved learned capacity %llduah\n",
-					*learned_cap_uah);
 	return 0;
 }
 
@@ -1570,6 +1569,47 @@ static int qg_store_learned_capacity(void *data, int64_t learned_cap_uah)
 	return 0;
 }
 
+static int qg_get_batt_age_level(void *data, u32 *batt_age_level)
+{
+	struct qpnp_qg *chip = data;
+	int rc;
+
+	if (!chip)
+		return -ENODEV;
+
+	if (chip->battery_missing || is_debug_batt_id(chip))
+		return -ENODEV;
+
+	*batt_age_level = 0;
+	rc = qg_sdam_read(SDAM_BATT_AGE_LEVEL, batt_age_level);
+	if (rc < 0) {
+		pr_err("Error in reading batt_age_level, rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int qg_store_batt_age_level(void *data, u32 batt_age_level)
+{
+	struct qpnp_qg *chip = data;
+	int rc;
+
+	if (!chip)
+		return -ENODEV;
+
+	if (chip->battery_missing)
+		return -ENODEV;
+
+	rc = qg_sdam_write(SDAM_BATT_AGE_LEVEL, batt_age_level);
+	if (rc < 0) {
+		pr_err("Error in writing batt_age_level, rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
 static int qg_get_cc_soc(void *data, int *cc_soc)
 {
 	struct qpnp_qg *chip = data;
@@ -1577,6 +1617,11 @@ static int qg_get_cc_soc(void *data, int *cc_soc)
 	if (!chip)
 		return -ENODEV;
 
+	if (is_debug_batt_id(chip) || chip->battery_missing) {
+		*cc_soc = -EINVAL;
+		return 0;
+	}
+
 	if (chip->cc_soc == INT_MIN)
 		return -EINVAL;
 
@@ -1705,6 +1750,11 @@ static int qg_get_charge_counter(struct qpnp_qg *chip, int *charge_counter)
 	int rc, cc_soc = 0;
 	int64_t temp = 0;
 
+	if (is_debug_batt_id(chip) || chip->battery_missing) {
+		*charge_counter = -EINVAL;
+		return 0;
+	}
+
 	rc = qg_get_learned_capacity(chip, &temp);
 	if (rc < 0 || !temp)
 		rc = qg_get_nominal_capacity((int *)&temp, 250, true);
@@ -1940,6 +1990,40 @@ static int qg_reset(struct qpnp_qg *chip)
 	return rc;
 }
 
+static int qg_setprop_batt_age_level(struct qpnp_qg *chip, int batt_age_level)
+{
+	int rc = 0;
+
+	if (!chip->dt.multi_profile_load)
+		return 0;
+
+	if (batt_age_level < 0) {
+		pr_err("Invalid age-level %d\n", batt_age_level);
+		return -EINVAL;
+	}
+
+	if (chip->batt_age_level == batt_age_level) {
+		qg_dbg(chip, QG_DEBUG_PROFILE, "Same age-level %d\n",
+						chip->batt_age_level);
+		return 0;
+	}
+
+	chip->batt_age_level = batt_age_level;
+	rc = qg_load_battery_profile(chip);
+	if (rc < 0) {
+		pr_err("failed to load profile\n");
+	} else {
+		rc = qg_store_batt_age_level(chip, batt_age_level);
+		if (rc < 0)
+			pr_err("error in storing batt_age_level rc =%d\n", rc);
+	}
+
+	qg_dbg(chip, QG_DEBUG_PROFILE, "Profile with batt_age_level = %d loaded\n",
+						chip->batt_age_level);
+
+	return rc;
+}
+
 static int qg_psy_set_property(struct power_supply *psy,
 			       enum power_supply_property psp,
 			       const union power_supply_propval *pval)
@@ -1971,6 +2055,8 @@ static int qg_psy_set_property(struct power_supply *psy,
 		chip->soh = pval->intval;
 		qg_dbg(chip, QG_DEBUG_STATUS, "SOH update: SOH=%d esr_actual=%d esr_nominal=%d\n",
 				chip->soh, chip->esr_actual, chip->esr_nominal);
+		if (chip->sp)
+			soh_profile_update(chip->sp, chip->soh);
 		break;
 	case POWER_SUPPLY_PROP_ESR_ACTUAL:
 		chip->esr_actual = pval->intval;
@@ -1981,6 +2067,9 @@ static int qg_psy_set_property(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_FG_RESET:
 		qg_reset(chip);
 		break;
+	case POWER_SUPPLY_PROP_BATT_AGE_LEVEL:
+		rc = qg_setprop_batt_age_level(chip, pval->intval);
+		break;
 	default:
 		break;
 	}
@@ -2110,6 +2199,9 @@ static int qg_psy_get_property(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_SCALE_MODE_EN:
 		pval->intval = chip->fvss_active;
 		break;
+	case POWER_SUPPLY_PROP_BATT_AGE_LEVEL:
+		pval->intval = chip->batt_age_level;
+		break;
 	default:
 		pr_debug("Unsupported property %d\n", psp);
 		break;
@@ -2127,6 +2219,7 @@ static int qg_property_is_writeable(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_ESR_NOMINAL:
 	case POWER_SUPPLY_PROP_SOH:
 	case POWER_SUPPLY_PROP_FG_RESET:
+	case POWER_SUPPLY_PROP_BATT_AGE_LEVEL:
 		return 1;
 	default:
 		break;
@@ -2169,6 +2262,7 @@ static enum power_supply_property qg_psy_props[] = {
 	POWER_SUPPLY_PROP_POWER_AVG,
 	POWER_SUPPLY_PROP_POWER_NOW,
 	POWER_SUPPLY_PROP_SCALE_MODE_EN,
+	POWER_SUPPLY_PROP_BATT_AGE_LEVEL,
 };
 
 static const struct power_supply_desc qg_psy_desc = {
@@ -2807,17 +2901,39 @@ static int get_batt_id_ohm(struct qpnp_qg *chip, u32 *batt_id_ohm)
 static int qg_load_battery_profile(struct qpnp_qg *chip)
 {
 	struct device_node *node = chip->dev->of_node;
-	struct device_node *batt_node, *profile_node;
-	int rc, tuple_len, len, i;
+	struct device_node *profile_node;
+	int rc, tuple_len, len, i, avail_age_level = 0;
 
-	batt_node = of_find_node_by_name(node, "qcom,battery-data");
-	if (!batt_node) {
+	chip->batt_node = of_find_node_by_name(node, "qcom,battery-data");
+	if (!chip->batt_node) {
 		pr_err("Batterydata not available\n");
 		return -ENXIO;
 	}
 
-	profile_node = of_batterydata_get_best_profile(batt_node,
+	if (chip->dt.multi_profile_load) {
+		if (chip->batt_age_level == -EINVAL) {
+			rc = qg_get_batt_age_level(chip, &chip->batt_age_level);
+			if (rc < 0) {
+				pr_err("error in retrieving batt age level rc=%d\n",
+									rc);
+				return rc;
+			}
+		}
+		profile_node = of_batterydata_get_best_aged_profile(
+					chip->batt_node,
+					chip->batt_id_ohm / 1000,
+					chip->batt_age_level,
+					&avail_age_level);
+		if (chip->batt_age_level != avail_age_level) {
+			qg_dbg(chip, QG_DEBUG_PROFILE, "Batt_age_level %d doesn't exist, using %d\n",
+					chip->batt_age_level, avail_age_level);
+			chip->batt_age_level = avail_age_level;
+		}
+	} else {
+		profile_node = of_batterydata_get_best_profile(chip->batt_node,
 				chip->batt_id_ohm / 1000, NULL);
+	}
+
 	if (IS_ERR(profile_node)) {
 		rc = PTR_ERR(profile_node);
 		pr_err("Failed to detect valid QG battery profile %d\n", rc);
@@ -3434,9 +3550,44 @@ static int qg_hw_init(struct qpnp_qg *chip)
 	return 0;
 }
 
+static int qg_soh_batt_profile_init(struct qpnp_qg *chip)
+{
+	int rc = 0;
+
+	if (!chip->dt.multi_profile_load)
+		return 0;
+
+	if (is_debug_batt_id(chip) || chip->battery_missing)
+		return 0;
+
+	if (!chip->sp)
+		chip->sp = devm_kzalloc(chip->dev, sizeof(*chip->sp),
+					GFP_KERNEL);
+	if (!chip->sp)
+		return -ENOMEM;
+
+	if (!chip->sp->initialized) {
+		chip->sp->batt_id_kohms = chip->batt_id_ohm / 1000;
+		chip->sp->bp_node = chip->batt_node;
+		chip->sp->last_batt_age_level = chip->batt_age_level;
+		chip->sp->bms_psy = chip->qg_psy;
+		rc = soh_profile_init(chip->dev, chip->sp);
+		if (rc < 0) {
+			devm_kfree(chip->dev, chip->sp);
+			chip->sp = NULL;
+		} else {
+			qg_dbg(chip, QG_DEBUG_PROFILE, "SOH profile count: %d\n",
+				chip->sp->profile_count);
+		}
+	}
+
+	return rc;
+}
+
 static int qg_post_init(struct qpnp_qg *chip)
 {
 	u8 status = 0;
+	int rc = 0;
 
 	/* disable all IRQs if profile is not loaded */
 	if (!chip->profile_loaded) {
@@ -3455,6 +3606,14 @@ static int qg_post_init(struct qpnp_qg *chip)
 	/* read STATUS2 register to clear its last state */
 	qg_read(chip, chip->qg_base + QG_STATUS2_REG, &status, 1);
 
+	/*soh based multi profile init */
+	rc = qg_soh_batt_profile_init(chip);
+	if (rc < 0) {
+		pr_err("Failed to initialize battery based on soh rc=%d\n",
+								rc);
+		return rc;
+	}
+
 	return 0;
 }
 
@@ -4072,6 +4231,9 @@ static int qg_parse_dt(struct qpnp_qg *chip)
 			chip->dt.fvss_vbat_mv = temp;
 	}
 
+	chip->dt.multi_profile_load = of_property_read_bool(node,
+					"qcom,multi-profile-load");
+
 	qg_dbg(chip, QG_DEBUG_PON, "DT: vbatt_empty_mv=%dmV vbatt_low_mv=%dmV delta_soc=%d ext-sns=%d\n",
 			chip->dt.vbatt_empty_mv, chip->dt.vbatt_low_mv,
 			chip->dt.delta_soc, chip->dt.qg_ext_sense);
@@ -4374,6 +4536,7 @@ static int qpnp_qg_probe(struct platform_device *pdev)
 	chip->soh = -EINVAL;
 	chip->esr_actual = -EINVAL;
 	chip->esr_nominal = -EINVAL;
+	chip->batt_age_level = -EINVAL;
 
 	qg_create_debugfs(chip);
 
@@ -4395,6 +4558,12 @@ static int qpnp_qg_probe(struct platform_device *pdev)
 		return rc;
 	}
 
+	rc = qg_sdam_init(chip->dev);
+	if (rc < 0) {
+		pr_err("Failed to initialize QG SDAM, rc=%d\n", rc);
+		return rc;
+	}
+
 	rc = qg_setup_battery(chip);
 	if (rc < 0) {
 		pr_err("Failed to setup battery, rc=%d\n", rc);
@@ -4407,12 +4576,6 @@ static int qpnp_qg_probe(struct platform_device *pdev)
 		return rc;
 	}
 
-	rc = qg_sdam_init(chip->dev);
-	if (rc < 0) {
-		pr_err("Failed to initialize QG SDAM, rc=%d\n", rc);
-		return rc;
-	}
-
 	rc = qg_sanitize_sdam(chip);
 	if (rc < 0) {
 		pr_err("Failed to sanitize SDAM, rc=%d\n", rc);
diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c
index 2cf5cdb..30f1787 100644
--- a/drivers/power/supply/qcom/smb5-lib.c
+++ b/drivers/power/supply/qcom/smb5-lib.c
@@ -6128,8 +6128,9 @@ irqreturn_t dc_plugin_irq_handler(int irq, void *data)
 
 		/*
 		 * Remove USB's CP ILIM vote - inapplicable for wireless
-		 * parallel charging.
+		 * parallel charging. Also undo FCC STEPPER's 1.5 A vote.
 		 */
+		vote(chg->fcc_votable, FCC_STEPPER_VOTER, false, 0);
 		if (chg->cp_ilim_votable)
 			vote(chg->cp_ilim_votable, ICL_CHANGE_VOTER, false, 0);
 
@@ -6182,6 +6183,10 @@ irqreturn_t dc_plugin_irq_handler(int irq, void *data)
 		vote(chg->dc_suspend_votable, CHG_TERMINATION_VOTER, false, 0);
 		vote(chg->fcc_main_votable, WLS_PL_CHARGING_VOTER, false, 0);
 
+		/* Force 1500mA FCC on WLS removal if fcc stepper is enabled */
+		if (chg->fcc_stepper_enable)
+			vote(chg->fcc_votable, FCC_STEPPER_VOTER,
+							true, 1500000);
 		chg->last_wls_vout = 0;
 	}
 
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 212d99d..4d851df 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -313,7 +313,6 @@ static int pwm_export_child(struct device *parent, struct pwm_device *pwm)
 	export->pwm = pwm;
 	mutex_init(&export->lock);
 
-	export->child.class = parent->class;
 	export->child.release = pwm_export_release;
 	export->child.parent = parent;
 	export->child.devt = MKDEV(0, 0);
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index d859315..c996be2 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -1607,9 +1607,7 @@ static const struct rpmsg_endpoint_ops glink_endpoint_ops = {
 static void qcom_glink_rpdev_release(struct device *dev)
 {
 	struct rpmsg_device *rpdev = to_rpmsg_device(dev);
-	struct glink_channel *channel = to_glink_channel(rpdev->ept);
 
-	channel->rpdev = NULL;
 	kfree(rpdev);
 }
 
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 4ac4a73..4b7cc8d 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -1569,13 +1569,13 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
 		rc = qdio_kick_outbound_q(q, phys_aob);
 	} else if (need_siga_sync(q)) {
 		rc = qdio_siga_sync_q(q);
+	} else if (count < QDIO_MAX_BUFFERS_PER_Q &&
+		   get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
+		   state == SLSB_CU_OUTPUT_PRIMED) {
+		/* The previous buffer is not processed yet, tack on. */
+		qperf_inc(q, fast_requeue);
 	} else {
-		/* try to fast requeue buffers */
-		get_buf_state(q, prev_buf(bufnr), &state, 0);
-		if (state != SLSB_CU_OUTPUT_PRIMED)
-			rc = qdio_kick_outbound_q(q, 0);
-		else
-			qperf_inc(q, fast_requeue);
+		rc = qdio_kick_outbound_q(q, 0);
 	}
 
 	/* in case of SIGA errors we must process the error immediately */
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 70a006ba..4fe06ff 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -89,8 +89,10 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
 				  sizeof(*pa->pa_iova_pfn) +
 				  sizeof(*pa->pa_pfn),
 				  GFP_KERNEL);
-	if (unlikely(!pa->pa_iova_pfn))
+	if (unlikely(!pa->pa_iova_pfn)) {
+		pa->pa_nr = 0;
 		return -ENOMEM;
+	}
 	pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
 
 	pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index d1154ba..9c21938 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -54,6 +54,7 @@
 #define ALUA_FAILOVER_TIMEOUT		60
 #define ALUA_FAILOVER_RETRIES		5
 #define ALUA_RTPG_DELAY_MSECS		5
+#define ALUA_RTPG_RETRY_DELAY		2
 
 /* device handler flags */
 #define ALUA_OPTIMIZE_STPG		0x01
@@ -696,7 +697,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
 	case SCSI_ACCESS_STATE_TRANSITIONING:
 		if (time_before(jiffies, pg->expiry)) {
 			/* State transition, retry */
-			pg->interval = 2;
+			pg->interval = ALUA_RTPG_RETRY_DELAY;
 			err = SCSI_DH_RETRY;
 		} else {
 			struct alua_dh_data *h;
@@ -821,6 +822,8 @@ static void alua_rtpg_work(struct work_struct *work)
 				spin_lock_irqsave(&pg->lock, flags);
 				pg->flags &= ~ALUA_PG_RUNNING;
 				pg->flags |= ALUA_PG_RUN_RTPG;
+				if (!pg->interval)
+					pg->interval = ALUA_RTPG_RETRY_DELAY;
 				spin_unlock_irqrestore(&pg->lock, flags);
 				queue_delayed_work(kaluad_wq, &pg->rtpg_work,
 						   pg->interval * HZ);
@@ -832,6 +835,8 @@ static void alua_rtpg_work(struct work_struct *work)
 		spin_lock_irqsave(&pg->lock, flags);
 		if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
 			pg->flags &= ~ALUA_PG_RUNNING;
+			if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG))
+				pg->interval = ALUA_RTPG_RETRY_DELAY;
 			pg->flags |= ALUA_PG_RUN_RTPG;
 			spin_unlock_irqrestore(&pg->lock, flags);
 			queue_delayed_work(kaluad_wq, &pg->rtpg_work,
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index c43eccd..f570b8c 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2320,6 +2320,8 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
 	case IOACCEL2_SERV_RESPONSE_COMPLETE:
 		switch (c2->error_data.status) {
 		case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
+			if (cmd)
+				cmd->result = 0;
 			break;
 		case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
 			cmd->result |= SAM_STAT_CHECK_CONDITION;
@@ -2479,8 +2481,10 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
 
 	/* check for good status */
 	if (likely(c2->error_data.serv_response == 0 &&
-			c2->error_data.status == 0))
+			c2->error_data.status == 0)) {
+		cmd->result = 0;
 		return hpsa_cmd_free_and_done(h, c, cmd);
+	}
 
 	/*
 	 * Any RAID offload error results in retry which will use
@@ -5618,6 +5622,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
 	c = cmd_tagged_alloc(h, cmd);
 
 	/*
+	 * This is necessary because the SML doesn't zero out this field during
+	 * error recovery.
+	 */
+	cmd->result = 0;
+
+	/*
 	 * Call alternate submit routine for I/O accelerated commands.
 	 * Retries always go down the normal I/O path.
 	 */
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index b64ca97..71d53bb 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -4874,8 +4874,8 @@ static int ibmvfc_remove(struct vio_dev *vdev)
 
 	spin_lock_irqsave(vhost->host->host_lock, flags);
 	ibmvfc_purge_requests(vhost, DID_ERROR);
-	ibmvfc_free_event_pool(vhost);
 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
+	ibmvfc_free_event_pool(vhost);
 
 	ibmvfc_free_mem(vhost);
 	spin_lock(&ibmvfc_driver_lock);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index e0c8722..806ceab 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3025,6 +3025,7 @@ megasas_fw_crash_buffer_show(struct device *cdev,
 	u32 size;
 	unsigned long buff_addr;
 	unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
+	unsigned long chunk_left_bytes;
 	unsigned long src_addr;
 	unsigned long flags;
 	u32 buff_offset;
@@ -3050,6 +3051,8 @@ megasas_fw_crash_buffer_show(struct device *cdev,
 	}
 
 	size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
+	chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
+	size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
 	size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
 
 	src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f84f9bf..ddce32f 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -4732,7 +4732,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
 		ql_log(ql_log_warn, vha, 0xd049,
 		    "Failed to allocate ct_sns request.\n");
 		kfree(fcport);
-		fcport = NULL;
+		return NULL;
 	}
 	INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
 	INIT_LIST_HEAD(&fcport->gnl_entry);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index ec0a589..a029804 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -7075,8 +7075,8 @@ static void ufshcd_err_handler(struct work_struct *work)
 
 	/*
 	 * if host reset is required then skip clearing the pending
-	 * transfers forcefully because they will automatically get
-	 * cleared after link startup.
+	 * transfers forcefully because they will get cleared during
+	 * host reset and restore
 	 */
 	if (needs_reset)
 		goto skip_pending_xfer_clear;
@@ -7647,7 +7647,6 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
 	hba = shost_priv(host);
 	tag = cmd->request->tag;
 
-	ufshcd_print_cmd_log(hba);
 	lrbp = &hba->lrb[tag];
 	err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
 	if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
@@ -7891,9 +7890,15 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
 	int err;
 	unsigned long flags;
 
-	/* Reset the host controller */
+	/*
+	 * Stop the host controller and complete the requests
+	 * cleared by h/w
+	 */
 	spin_lock_irqsave(hba->host->host_lock, flags);
 	ufshcd_hba_stop(hba, false);
+	hba->silence_err_logs = true;
+	ufshcd_complete_requests(hba);
+	hba->silence_err_logs = false;
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
 	/* scale up clocks to max frequency before full reinitialization */
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 1c5acda..473e29d 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -207,6 +207,8 @@
 	  status indication and disables flows while grant size is reached.
 	  If unsure or not use burst mode flow control, say 'N'.
 
+source "drivers/soc/qcom/rmnet_ctl/Kconfig"
+
 config QCOM_QMI_POWER_COLLAPSE
 	bool "Enable power save features"
 	depends on QCOM_QMI_RMNET
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 64024f4..ef36b9c 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -8,7 +8,7 @@
 obj-$(CONFIG_QCOM_QMI_HELPERS)	+= qmi_helpers.o
 qmi_helpers-y	+= qmi_encdec.o qmi_interface.o
 obj-$(CONFIG_QCOM_QMI_RMNET)	+= qmi_rmnet.o
-obj-$(CONFIG_QCOM_QMI_DFC)	+= dfc_qmi.o
+obj-$(CONFIG_QCOM_QMI_DFC)	+= dfc_qmi.o dfc_qmap.o
 obj-$(CONFIG_QCOM_QMI_POWER_COLLAPSE) += wda_qmi.o
 obj-$(CONFIG_QCOM_RMTFS_MEM)	+= rmtfs_mem.o
 obj-$(CONFIG_QCOM_RPMH)		+= qcom_rpmh.o
@@ -86,3 +86,4 @@
 obj-$(CONFIG_QCOM_CDSP_RM) += cdsprm.o
 obj-$(CONFIG_ICNSS) += icnss.o
 obj-$(CONFIG_ICNSS_QMI) += icnss_qmi.o wlan_firmware_service_v01.o
+obj-$(CONFIG_RMNET_CTL) += rmnet_ctl/
diff --git a/drivers/soc/qcom/dcc_v2.c b/drivers/soc/qcom/dcc_v2.c
index d1241fe5..eb0c41c 100644
--- a/drivers/soc/qcom/dcc_v2.c
+++ b/drivers/soc/qcom/dcc_v2.c
@@ -564,13 +564,15 @@ static int dcc_valid_list(struct dcc_drvdata *drvdata, int curr_list)
 		return -EINVAL;
 
 	if (drvdata->enable[curr_list]) {
-		dev_err(drvdata->dev, "DCC is already enabled\n");
+		dev_err(drvdata->dev, "List %d is already enabled\n",
+				curr_list);
 		return -EINVAL;
 	}
 
 	lock_reg = dcc_readl(drvdata, DCC_LL_LOCK(curr_list));
 	if (lock_reg & 0x1) {
-		dev_err(drvdata->dev, "DCC is already enabled\n");
+		dev_err(drvdata->dev, "List %d is already locked\n",
+				curr_list);
 		return -EINVAL;
 	}
 
@@ -578,6 +580,21 @@ static int dcc_valid_list(struct dcc_drvdata *drvdata, int curr_list)
 	return 0;
 }
 
+static bool is_dcc_enabled(struct dcc_drvdata *drvdata)
+{
+	bool dcc_enable = false;
+	int list;
+
+	for (list = 0; list < DCC_MAX_LINK_LIST; list++) {
+		if (drvdata->enable[list]) {
+			dcc_enable = true;
+			break;
+		}
+	}
+
+	return dcc_enable;
+}
+
 static int dcc_enable(struct dcc_drvdata *drvdata)
 {
 	int ret = 0;
@@ -586,7 +603,9 @@ static int dcc_enable(struct dcc_drvdata *drvdata)
 
 	mutex_lock(&drvdata->mutex);
 
-	memset_io(drvdata->ram_base, 0xDE, drvdata->ram_size);
+	if (!is_dcc_enabled(drvdata)) {
+		memset_io(drvdata->ram_base, 0xDE, drvdata->ram_size);
+	}
 
 	for (list = 0; list < DCC_MAX_LINK_LIST; list++) {
 
@@ -667,21 +686,6 @@ static void dcc_disable(struct dcc_drvdata *drvdata)
 	mutex_unlock(&drvdata->mutex);
 }
 
-static bool is_dcc_enabled(struct dcc_drvdata *drvdata)
-{
-	bool dcc_enable = false;
-	int list;
-
-	for (list = 0; list < DCC_MAX_LINK_LIST; list++) {
-		if (drvdata->enable[list]) {
-			dcc_enable = true;
-			break;
-		}
-	}
-
-	return dcc_enable;
-}
-
 static ssize_t curr_list_show(struct device *dev,
 			       struct device_attribute *attr, char *buf)
 {
diff --git a/drivers/soc/qcom/dfc_defs.h b/drivers/soc/qcom/dfc_defs.h
new file mode 100644
index 0000000..7553707
--- /dev/null
+++ b/drivers/soc/qcom/dfc_defs.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DFC_DEFS_H
+#define _DFC_DEFS_H
+
+#include <linux/soc/qcom/qmi.h>
+#include "qmi_rmnet_i.h"
+
+#define DFC_ACK_TYPE_DISABLE 1
+#define DFC_ACK_TYPE_THRESHOLD 2
+
+#define DFC_MASK_TCP_BIDIR 0x1
+#define DFC_MASK_RAT_SWITCH 0x2
+#define DFC_IS_TCP_BIDIR(r) (bool)((r) & DFC_MASK_TCP_BIDIR)
+#define DFC_IS_RAT_SWITCH(r) (bool)((r) & DFC_MASK_RAT_SWITCH)
+
+#define DFC_MAX_QOS_ID_V01 2
+
+struct dfc_qmi_data {
+	void *rmnet_port;
+	struct workqueue_struct *dfc_wq;
+	struct work_struct svc_arrive;
+	struct qmi_handle handle;
+	struct sockaddr_qrtr ssctl;
+	struct svc_info svc;
+	struct work_struct qmi_ind_work;
+	struct list_head qmi_ind_q;
+	spinlock_t qmi_ind_lock;
+	int index;
+	int restart_state;
+};
+
+enum dfc_ip_type_enum_v01 {
+	DFC_IP_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	DFC_IPV4_TYPE_V01 = 0x4,
+	DFC_IPV6_TYPE_V01 = 0x6,
+	DFC_IP_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+};
+
+struct dfc_qos_id_type_v01 {
+	u32 qos_id;
+	enum dfc_ip_type_enum_v01 ip_type;
+};
+
+struct dfc_flow_status_info_type_v01 {
+	u8 subs_id;
+	u8 mux_id;
+	u8 bearer_id;
+	u32 num_bytes;
+	u16 seq_num;
+	u8 qos_ids_len;
+	struct dfc_qos_id_type_v01 qos_ids[DFC_MAX_QOS_ID_V01];
+};
+
+struct dfc_ancillary_info_type_v01 {
+	u8 subs_id;
+	u8 mux_id;
+	u8 bearer_id;
+	u32 reserved;
+};
+
+struct dfc_flow_status_ind_msg_v01 {
+	u8 flow_status_valid;
+	u8 flow_status_len;
+	struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01];
+	u8 eod_ack_reqd_valid;
+	u8 eod_ack_reqd;
+	u8 ancillary_info_valid;
+	u8 ancillary_info_len;
+	struct dfc_ancillary_info_type_v01 ancillary_info[DFC_MAX_BEARERS_V01];
+};
+
+struct dfc_bearer_info_type_v01 {
+	u8 subs_id;
+	u8 mux_id;
+	u8 bearer_id;
+	enum dfc_ip_type_enum_v01 ip_type;
+};
+
+struct dfc_tx_link_status_ind_msg_v01 {
+	u8 tx_status;
+	u8 bearer_info_valid;
+	u8 bearer_info_len;
+	struct dfc_bearer_info_type_v01 bearer_info[DFC_MAX_BEARERS_V01];
+};
+
+void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
+			       struct dfc_flow_status_ind_msg_v01 *ind);
+
+void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc,
+				   struct dfc_tx_link_status_ind_msg_v01 *ind);
+
+#endif /* _DFC_DEFS_H */
diff --git a/drivers/soc/qcom/dfc_qmap.c b/drivers/soc/qcom/dfc_qmap.c
new file mode 100644
index 0000000..a4b2095
--- /dev/null
+++ b/drivers/soc/qcom/dfc_qmap.c
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <net/pkt_sched.h>
+#include <soc/qcom/rmnet_qmi.h>
+#include <soc/qcom/qmi_rmnet.h>
+#include <trace/events/dfc.h>
+#include <soc/qcom/rmnet_ctl.h>
+#include "dfc_defs.h"
+
+#define QMAP_DFC_VER		1
+
+#define QMAP_CMD_DONE		-1
+
+#define QMAP_CMD_REQUEST	0
+#define QMAP_CMD_ACK		1
+#define QMAP_CMD_UNSUPPORTED	2
+#define QMAP_CMD_INVALID	3
+
+#define QMAP_DFC_CONFIG		10
+#define QMAP_DFC_IND		11
+#define QMAP_DFC_QUERY		12
+#define QMAP_DFC_END_MARKER	13
+
+struct qmap_hdr {
+	u8	cd_pad;
+	u8	mux_id;
+	__be16	pkt_len;
+} __aligned(1);
+
+#define QMAP_HDR_LEN sizeof(struct qmap_hdr)
+
+struct qmap_cmd_hdr {
+	u8	pad_len:6;
+	u8	reserved_bit:1;
+	u8	cd_bit:1;
+	u8	mux_id;
+	__be16	pkt_len;
+	u8	cmd_name;
+	u8	cmd_type:2;
+	u8	reserved:6;
+	u16	reserved2;
+	__be32	tx_id;
+} __aligned(1);
+
+struct qmap_dfc_config {
+	struct qmap_cmd_hdr	hdr;
+	u8			cmd_ver;
+	u8			cmd_id;
+	u8			reserved;
+	u8			tx_info:1;
+	u8			reserved2:7;
+	__be32			ep_type;
+	__be32			iface_id;
+	u32			reserved3;
+} __aligned(1);
+
+struct qmap_dfc_ind {
+	struct qmap_cmd_hdr	hdr;
+	u8			cmd_ver;
+	u8			reserved;
+	__be16			seq_num;
+	u8			reserved2;
+	u8			tx_info_valid:1;
+	u8			tx_info:1;
+	u8			reserved3:6;
+	u8			bearer_id;
+	u8			tcp_bidir:1;
+	u8			bearer_status:3;
+	u8			reserved4:4;
+	__be32			grant;
+	u32			reserved5;
+	u32			reserved6;
+} __aligned(1);
+
+struct qmap_dfc_query {
+	struct qmap_cmd_hdr	hdr;
+	u8			cmd_ver;
+	u8			reserved;
+	u8			bearer_id;
+	u8			reserved2;
+	u32			reserved3;
+} __aligned(1);
+
+struct qmap_dfc_query_resp {
+	struct qmap_cmd_hdr	hdr;
+	u8			cmd_ver;
+	u8			bearer_id;
+	u8			tcp_bidir:1;
+	u8			reserved:7;
+	u8			invalid:1;
+	u8			reserved2:7;
+	__be32			grant;
+	u32			reserved3;
+	u32			reserved4;
+} __aligned(1);
+
+struct qmap_dfc_end_marker_req {
+	struct qmap_cmd_hdr	hdr;
+	u8			cmd_ver;
+	u8			reserved;
+	u8			bearer_id;
+	u8			reserved2;
+	u16			reserved3;
+	__be16			seq_num;
+	u32			reserved4;
+} __aligned(1);
+
+struct qmap_dfc_end_marker_cnf {
+	struct qmap_cmd_hdr	hdr;
+	u8			cmd_ver;
+	u8			reserved;
+	u8			bearer_id;
+	u8			reserved2;
+	u16			reserved3;
+	__be16			seq_num;
+	u32			reserved4;
+} __aligned(1);
+
+static struct dfc_flow_status_ind_msg_v01 qmap_flow_ind;
+static struct dfc_tx_link_status_ind_msg_v01 qmap_tx_ind;
+static struct dfc_qmi_data __rcu *qmap_dfc_data;
+static atomic_t qmap_txid;
+static void *rmnet_ctl_handle;
+
+static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
+					 u8 bearer_id, u16 seq, u32 tx_id);
+
+static void dfc_qmap_send_cmd(struct sk_buff *skb)
+{
+	trace_dfc_qmap(skb->data, skb->len, false);
+
+	if (rmnet_ctl_send_client(rmnet_ctl_handle, skb)) {
+		pr_err("Failed to send to rmnet ctl\n");
+		kfree_skb(skb);
+	}
+}
+
+static void dfc_qmap_send_inband_ack(struct dfc_qmi_data *dfc,
+				     struct sk_buff *skb)
+{
+	struct qmap_cmd_hdr *cmd;
+
+	cmd = (struct qmap_cmd_hdr *)skb->data;
+
+	skb->protocol = htons(ETH_P_MAP);
+	skb->dev = rmnet_get_real_dev(dfc->rmnet_port);
+
+	trace_dfc_qmap(skb->data, skb->len, false);
+	dev_queue_xmit(skb);
+}
+
+static int dfc_qmap_handle_ind(struct dfc_qmi_data *dfc,
+			       struct sk_buff *skb)
+{
+	struct qmap_dfc_ind *cmd;
+
+	if (skb->len < sizeof(struct qmap_dfc_ind))
+		return QMAP_CMD_INVALID;
+
+	cmd = (struct qmap_dfc_ind *)skb->data;
+
+	if (cmd->tx_info_valid) {
+		memset(&qmap_tx_ind, 0, sizeof(qmap_tx_ind));
+		qmap_tx_ind.tx_status = cmd->tx_info;
+		qmap_tx_ind.bearer_info_valid = 1;
+		qmap_tx_ind.bearer_info_len = 1;
+		qmap_tx_ind.bearer_info[0].mux_id = cmd->hdr.mux_id;
+		qmap_tx_ind.bearer_info[0].bearer_id = cmd->bearer_id;
+
+		dfc_handle_tx_link_status_ind(dfc, &qmap_tx_ind);
+
+		/* Ignore grant since it is always 0 */
+		goto done;
+	}
+
+	memset(&qmap_flow_ind, 0, sizeof(qmap_flow_ind));
+	qmap_flow_ind.flow_status_valid = 1;
+	qmap_flow_ind.flow_status_len = 1;
+	qmap_flow_ind.flow_status[0].mux_id = cmd->hdr.mux_id;
+	qmap_flow_ind.flow_status[0].bearer_id = cmd->bearer_id;
+	qmap_flow_ind.flow_status[0].num_bytes = ntohl(cmd->grant);
+	qmap_flow_ind.flow_status[0].seq_num = ntohs(cmd->seq_num);
+
+	if (cmd->tcp_bidir) {
+		qmap_flow_ind.ancillary_info_valid = 1;
+		qmap_flow_ind.ancillary_info_len = 1;
+		qmap_flow_ind.ancillary_info[0].mux_id = cmd->hdr.mux_id;
+		qmap_flow_ind.ancillary_info[0].bearer_id = cmd->bearer_id;
+		qmap_flow_ind.ancillary_info[0].reserved = DFC_MASK_TCP_BIDIR;
+	}
+
+	dfc_do_burst_flow_control(dfc, &qmap_flow_ind);
+
+done:
+	return QMAP_CMD_ACK;
+}
+
+static int dfc_qmap_handle_query_resp(struct dfc_qmi_data *dfc,
+				      struct sk_buff *skb)
+{
+	struct qmap_dfc_query_resp *cmd;
+
+	if (skb->len < sizeof(struct qmap_dfc_query_resp))
+		return QMAP_CMD_DONE;
+
+	cmd = (struct qmap_dfc_query_resp *)skb->data;
+
+	if (cmd->invalid)
+		return QMAP_CMD_DONE;
+
+	memset(&qmap_flow_ind, 0, sizeof(qmap_flow_ind));
+	qmap_flow_ind.flow_status_valid = 1;
+	qmap_flow_ind.flow_status_len = 1;
+
+	qmap_flow_ind.flow_status[0].mux_id = cmd->hdr.mux_id;
+	qmap_flow_ind.flow_status[0].bearer_id = cmd->bearer_id;
+	qmap_flow_ind.flow_status[0].num_bytes = ntohl(cmd->grant);
+	qmap_flow_ind.flow_status[0].seq_num = 0xFFFF;
+
+	if (cmd->tcp_bidir) {
+		qmap_flow_ind.ancillary_info_valid = 1;
+		qmap_flow_ind.ancillary_info_len = 1;
+		qmap_flow_ind.ancillary_info[0].mux_id = cmd->hdr.mux_id;
+		qmap_flow_ind.ancillary_info[0].bearer_id = cmd->bearer_id;
+		qmap_flow_ind.ancillary_info[0].reserved = DFC_MASK_TCP_BIDIR;
+	}
+
+	dfc_do_burst_flow_control(dfc, &qmap_flow_ind);
+
+	return QMAP_CMD_DONE;
+}
+
+static void dfc_qmap_set_end_marker(struct dfc_qmi_data *dfc, u8 mux_id,
+				    u8 bearer_id, u16 seq_num, u32 tx_id)
+{
+	struct net_device *dev;
+	struct qos_info *qos;
+	struct rmnet_bearer_map *bearer;
+
+	dev = rmnet_get_rmnet_dev(dfc->rmnet_port, mux_id);
+	if (!dev)
+		return;
+
+	qos = (struct qos_info *)rmnet_get_qos_pt(dev);
+	if (!qos)
+		return;
+
+	spin_lock_bh(&qos->qos_lock);
+
+	bearer = qmi_rmnet_get_bearer_map(qos, bearer_id);
+
+	if (bearer && bearer->last_seq == seq_num && bearer->grant_size) {
+		bearer->ack_req = 1;
+		bearer->ack_txid = tx_id;
+	} else {
+		dfc_qmap_send_end_marker_cnf(qos, bearer_id, seq_num, tx_id);
+	}
+
+	spin_unlock_bh(&qos->qos_lock);
+}
+
+static int dfc_qmap_handle_end_marker_req(struct dfc_qmi_data *dfc,
+					  struct sk_buff *skb)
+{
+	struct qmap_dfc_end_marker_req *cmd;
+
+	if (skb->len < sizeof(struct qmap_dfc_end_marker_req))
+		return QMAP_CMD_INVALID;
+
+	cmd = (struct qmap_dfc_end_marker_req *)skb->data;
+
+	dfc_qmap_set_end_marker(dfc, cmd->hdr.mux_id, cmd->bearer_id,
+				ntohs(cmd->seq_num), ntohl(cmd->hdr.tx_id));
+
+	return QMAP_CMD_DONE;
+}
+
+static void dfc_qmap_cmd_handler(struct sk_buff *skb)
+{
+	struct qmap_cmd_hdr *cmd;
+	struct dfc_qmi_data *dfc;
+	int rc = QMAP_CMD_DONE;
+
+	if (!skb)
+		return;
+
+	trace_dfc_qmap(skb->data, skb->len, true);
+
+	if (skb->len < sizeof(struct qmap_cmd_hdr))
+		goto free_skb;
+
+	cmd = (struct qmap_cmd_hdr *)skb->data;
+	if (!cmd->cd_bit || skb->len != ntohs(cmd->pkt_len) + QMAP_HDR_LEN)
+		goto free_skb;
+
+	if (cmd->cmd_name == QMAP_DFC_QUERY) {
+		if (cmd->cmd_type != QMAP_CMD_ACK)
+			goto free_skb;
+	} else if (cmd->cmd_type != QMAP_CMD_REQUEST) {
+		goto free_skb;
+	}
+
+	rcu_read_lock();
+
+	dfc = rcu_dereference(qmap_dfc_data);
+	if (!dfc || READ_ONCE(dfc->restart_state)) {
+		rcu_read_unlock();
+		goto free_skb;
+	}
+
+	switch (cmd->cmd_name) {
+	case QMAP_DFC_IND:
+		rc = dfc_qmap_handle_ind(dfc, skb);
+		qmi_rmnet_set_dl_msg_active(dfc->rmnet_port);
+		break;
+
+	case QMAP_DFC_QUERY:
+		rc = dfc_qmap_handle_query_resp(dfc, skb);
+		break;
+
+	case QMAP_DFC_END_MARKER:
+		rc = dfc_qmap_handle_end_marker_req(dfc, skb);
+		break;
+
+	default:
+		rc = QMAP_CMD_UNSUPPORTED;
+	}
+
+	/* Send ack */
+	if (rc != QMAP_CMD_DONE) {
+		cmd->cmd_type = rc;
+		if (cmd->cmd_name == QMAP_DFC_IND)
+			dfc_qmap_send_inband_ack(dfc, skb);
+		else
+			dfc_qmap_send_cmd(skb);
+
+		rcu_read_unlock();
+		return;
+	}
+
+	rcu_read_unlock();
+
+free_skb:
+	kfree_skb(skb);
+}
+
+static void dfc_qmap_send_config(struct dfc_qmi_data *data)
+{
+	struct sk_buff *skb;
+	struct qmap_dfc_config *dfc_config;
+	unsigned int len = sizeof(struct qmap_dfc_config);
+
+	skb = alloc_skb(len, GFP_ATOMIC);
+	if (!skb)
+		return;
+
+	skb->protocol = htons(ETH_P_MAP);
+	dfc_config = (struct qmap_dfc_config *)skb_put(skb, len);
+	memset(dfc_config, 0, len);
+
+	dfc_config->hdr.cd_bit = 1;
+	dfc_config->hdr.mux_id = 0;
+	dfc_config->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
+	dfc_config->hdr.cmd_name = QMAP_DFC_CONFIG;
+	dfc_config->hdr.cmd_type = QMAP_CMD_REQUEST;
+	dfc_config->hdr.tx_id = htonl(atomic_inc_return(&qmap_txid));
+
+	dfc_config->cmd_ver = QMAP_DFC_VER;
+	dfc_config->cmd_id = QMAP_DFC_IND;
+	dfc_config->tx_info = 1;
+	dfc_config->ep_type = htonl(data->svc.ep_type);
+	dfc_config->iface_id = htonl(data->svc.iface_id);
+
+	dfc_qmap_send_cmd(skb);
+}
+
+static void dfc_qmap_send_query(u8 mux_id, u8 bearer_id)
+{
+	struct sk_buff *skb;
+	struct qmap_dfc_query *dfc_query;
+	unsigned int len = sizeof(struct qmap_dfc_query);
+
+	skb = alloc_skb(len, GFP_ATOMIC);
+	if (!skb)
+		return;
+
+	skb->protocol = htons(ETH_P_MAP);
+	dfc_query = (struct qmap_dfc_query *)skb_put(skb, len);
+	memset(dfc_query, 0, len);
+
+	dfc_query->hdr.cd_bit = 1;
+	dfc_query->hdr.mux_id = mux_id;
+	dfc_query->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
+	dfc_query->hdr.cmd_name = QMAP_DFC_QUERY;
+	dfc_query->hdr.cmd_type = QMAP_CMD_REQUEST;
+	dfc_query->hdr.tx_id = htonl(atomic_inc_return(&qmap_txid));
+
+	dfc_query->cmd_ver = QMAP_DFC_VER;
+	dfc_query->bearer_id = bearer_id;
+
+	dfc_qmap_send_cmd(skb);
+}
+
+static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
+					 u8 bearer_id, u16 seq, u32 tx_id)
+{
+	struct sk_buff *skb;
+	struct qmap_dfc_end_marker_cnf *em_cnf;
+	unsigned int len = sizeof(struct qmap_dfc_end_marker_cnf);
+
+	skb = alloc_skb(len, GFP_ATOMIC);
+	if (!skb)
+		return;
+
+	em_cnf = (struct qmap_dfc_end_marker_cnf *)skb_put(skb, len);
+	memset(em_cnf, 0, len);
+
+	em_cnf->hdr.cd_bit = 1;
+	em_cnf->hdr.mux_id = qos->mux_id;
+	em_cnf->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
+	em_cnf->hdr.cmd_name = QMAP_DFC_END_MARKER;
+	em_cnf->hdr.cmd_type = QMAP_CMD_ACK;
+	em_cnf->hdr.tx_id = htonl(tx_id);
+
+	em_cnf->cmd_ver = QMAP_DFC_VER;
+	em_cnf->bearer_id = bearer_id;
+	em_cnf->seq_num = htons(seq);
+
+	skb->protocol = htons(ETH_P_MAP);
+	skb->dev = qos->real_dev;
+
+	/* This cmd needs to be sent in-band */
+	trace_dfc_qmap(skb->data, skb->len, false);
+	rmnet_map_tx_qmap_cmd(skb);
+}
+
+void dfc_qmap_send_ack(struct qos_info *qos, u8 bearer_id, u16 seq, u8 type)
+{
+	struct rmnet_bearer_map *bearer;
+
+	if (type == DFC_ACK_TYPE_DISABLE) {
+		bearer = qmi_rmnet_get_bearer_map(qos, bearer_id);
+		if (bearer)
+			dfc_qmap_send_end_marker_cnf(qos, bearer_id,
+						     seq, bearer->ack_txid);
+	} else if (type == DFC_ACK_TYPE_THRESHOLD) {
+		dfc_qmap_send_query(qos->mux_id, bearer_id);
+	}
+}
+
+static struct rmnet_ctl_client_hooks cb = {
+	.ctl_dl_client_hook = dfc_qmap_cmd_handler,
+};
+
+int dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
+			 struct qmi_info *qmi)
+{
+	struct dfc_qmi_data *data;
+
+	if (!port || !qmi)
+		return -EINVAL;
+
+	data = kzalloc(sizeof(struct dfc_qmi_data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->rmnet_port = port;
+	data->index = index;
+	memcpy(&data->svc, psvc, sizeof(data->svc));
+
+	qmi->dfc_clients[index] = (void *)data;
+	rcu_assign_pointer(qmap_dfc_data, data);
+
+	atomic_set(&qmap_txid, 0);
+
+	rmnet_ctl_handle = rmnet_ctl_register_client(&cb);
+	if (!rmnet_ctl_handle)
+		pr_err("Failed to register with rmnet ctl\n");
+
+	trace_dfc_client_state_up(data->index, data->svc.instance,
+				  data->svc.ep_type, data->svc.iface_id);
+
+	pr_info("DFC QMAP init\n");
+
+	dfc_qmap_send_config(data);
+
+	return 0;
+}
+
+void dfc_qmap_client_exit(void *dfc_data)
+{
+	struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data;
+
+	if (!data) {
+		pr_err("%s() data is null\n", __func__);
+		return;
+	}
+
+	trace_dfc_client_state_down(data->index, 0);
+
+	rmnet_ctl_unregister_client(rmnet_ctl_handle);
+
+	WRITE_ONCE(data->restart_state, 1);
+	RCU_INIT_POINTER(qmap_dfc_data, NULL);
+	synchronize_rcu();
+
+	kfree(data);
+
+	pr_info("DFC QMAP exit\n");
+}
diff --git a/drivers/soc/qcom/dfc_qmi.c b/drivers/soc/qcom/dfc_qmi.c
index 05a491c..f175881 100644
--- a/drivers/soc/qcom/dfc_qmi.c
+++ b/drivers/soc/qcom/dfc_qmi.c
@@ -3,26 +3,14 @@
  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/rtnetlink.h>
 #include <net/pkt_sched.h>
-#include <linux/soc/qcom/qmi.h>
 #include <soc/qcom/rmnet_qmi.h>
 #include <soc/qcom/qmi_rmnet.h>
+#include "dfc_defs.h"
 
-#include "qmi_rmnet_i.h"
 #define CREATE_TRACE_POINTS
 #include <trace/events/dfc.h>
 
-#define DFC_MASK_TCP_BIDIR 0x1
-#define DFC_MASK_RAT_SWITCH 0x2
-#define DFC_IS_TCP_BIDIR(r) (bool)((r) & DFC_MASK_TCP_BIDIR)
-#define DFC_IS_RAT_SWITCH(r) (bool)((r) & DFC_MASK_RAT_SWITCH)
-
-#define DFC_MAX_QOS_ID_V01 2
-
-#define DFC_ACK_TYPE_DISABLE 1
-#define DFC_ACK_TYPE_THRESHOLD 2
-
 struct dfc_qmap_header {
 	u8  pad_len:6;
 	u8  reserved_bit:1;
@@ -47,20 +35,6 @@ struct dfc_ack_cmd {
 	u8  bearer_id;
 } __aligned(1);
 
-struct dfc_qmi_data {
-	void *rmnet_port;
-	struct workqueue_struct *dfc_wq;
-	struct work_struct svc_arrive;
-	struct qmi_handle handle;
-	struct sockaddr_qrtr ssctl;
-	struct svc_info svc;
-	struct work_struct qmi_ind_work;
-	struct list_head qmi_ind_q;
-	spinlock_t qmi_ind_lock;
-	int index;
-	int restart_state;
-};
-
 static void dfc_svc_init(struct work_struct *work);
 
 /* **************************************************** */
@@ -106,28 +80,6 @@ struct dfc_indication_register_resp_msg_v01 {
 	struct qmi_response_type_v01 resp;
 };
 
-enum dfc_ip_type_enum_v01 {
-	DFC_IP_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
-	DFC_IPV4_TYPE_V01 = 0x4,
-	DFC_IPV6_TYPE_V01 = 0x6,
-	DFC_IP_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
-};
-
-struct dfc_qos_id_type_v01 {
-	u32 qos_id;
-	enum dfc_ip_type_enum_v01 ip_type;
-};
-
-struct dfc_flow_status_info_type_v01 {
-	u8 subs_id;
-	u8 mux_id;
-	u8 bearer_id;
-	u32 num_bytes;
-	u16 seq_num;
-	u8 qos_ids_len;
-	struct dfc_qos_id_type_v01 qos_ids[DFC_MAX_QOS_ID_V01];
-};
-
 static struct qmi_elem_info dfc_qos_id_type_v01_ei[] = {
 	{
 		.data_type	= QMI_UNSIGNED_4_BYTE,
@@ -241,13 +193,6 @@ static struct qmi_elem_info dfc_flow_status_info_type_v01_ei[] = {
 	},
 };
 
-struct dfc_ancillary_info_type_v01 {
-	u8 subs_id;
-	u8 mux_id;
-	u8 bearer_id;
-	u32 reserved;
-};
-
 static struct qmi_elem_info dfc_ancillary_info_type_v01_ei[] = {
 	{
 		.data_type	= QMI_UNSIGNED_1_BYTE,
@@ -300,31 +245,6 @@ static struct qmi_elem_info dfc_ancillary_info_type_v01_ei[] = {
 	},
 };
 
-struct dfc_flow_status_ind_msg_v01 {
-	u8 flow_status_valid;
-	u8 flow_status_len;
-	struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01];
-	u8 eod_ack_reqd_valid;
-	u8 eod_ack_reqd;
-	u8 ancillary_info_valid;
-	u8 ancillary_info_len;
-	struct dfc_ancillary_info_type_v01 ancillary_info[DFC_MAX_BEARERS_V01];
-};
-
-struct dfc_bearer_info_type_v01 {
-	u8 subs_id;
-	u8 mux_id;
-	u8 bearer_id;
-	enum dfc_ip_type_enum_v01 ip_type;
-};
-
-struct dfc_tx_link_status_ind_msg_v01 {
-	u8 tx_status;
-	u8 bearer_info_valid;
-	u8 bearer_info_len;
-	struct dfc_bearer_info_type_v01 bearer_info[DFC_MAX_BEARERS_V01];
-};
-
 struct dfc_get_flow_status_req_msg_v01 {
 	u8 bearer_id_list_valid;
 	u8 bearer_id_list_len;
@@ -954,6 +874,11 @@ dfc_send_ack(struct net_device *dev, u8 bearer_id, u16 seq, u8 mux_id, u8 type)
 	if (!qos)
 		return;
 
+	if (dfc_qmap) {
+		dfc_qmap_send_ack(qos, bearer_id, seq, type);
+		return;
+	}
+
 	skb = alloc_skb(data_size, GFP_ATOMIC);
 	if (!skb)
 		return;
@@ -1083,6 +1008,11 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
 		    (itm->grant_size > 0 && fc_info->num_bytes == 0))
 			action = true;
 
+		/* This is needed by qmap */
+		if (dfc_qmap && itm->ack_req && !ack_req && itm->grant_size)
+			dfc_qmap_send_ack(qos, itm->bearer_id,
+					  itm->seq, DFC_ACK_TYPE_DISABLE);
+
 		itm->grant_size = fc_info->num_bytes;
 		itm->grant_thresh = qmi_rmnet_grant_per(itm->grant_size);
 		itm->seq = fc_info->seq_num;
@@ -1099,10 +1029,9 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
 	return rc;
 }
 
-static void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
-				      struct dfc_svc_ind *svc_ind)
+void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
+			       struct dfc_flow_status_ind_msg_v01 *ind)
 {
-	struct dfc_flow_status_ind_msg_v01 *ind = &svc_ind->d.dfc_info;
 	struct net_device *dev;
 	struct qos_info *qos;
 	struct dfc_flow_status_info_type_v01 *flow_status;
@@ -1176,13 +1105,17 @@ static void dfc_update_tx_link_status(struct net_device *dev,
 	if (!itm)
 		return;
 
+	/* If no change in tx status, ignore */
+	if (itm->tx_off == !tx_status)
+		return;
+
 	if (itm->grant_size && !tx_status) {
 		itm->grant_size = 0;
 		itm->tcp_bidir = false;
 		dfc_bearer_flow_ctl(dev, itm, qos);
 	} else if (itm->grant_size == 0 && tx_status && !itm->rat_switch) {
 		itm->grant_size = DEFAULT_GRANT;
-		itm->grant_thresh = DEFAULT_GRANT;
+		itm->grant_thresh = qmi_rmnet_grant_per(DEFAULT_GRANT);
 		itm->seq = 0;
 		itm->ack_req = 0;
 		dfc_bearer_flow_ctl(dev, itm, qos);
@@ -1191,10 +1124,9 @@ static void dfc_update_tx_link_status(struct net_device *dev,
 	itm->tx_off = !tx_status;
 }
 
-static void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc,
-					  struct dfc_svc_ind *svc_ind)
+void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc,
+				   struct dfc_tx_link_status_ind_msg_v01 *ind)
 {
-	struct dfc_tx_link_status_ind_msg_v01 *ind = &svc_ind->d.tx_status;
 	struct net_device *dev;
 	struct qos_info *qos;
 	struct dfc_bearer_info_type_v01 *bearer_info;
@@ -1256,10 +1188,12 @@ static void dfc_qmi_ind_work(struct work_struct *work)
 
 		if (!dfc->restart_state) {
 			if (svc_ind->msg_id == QMI_DFC_FLOW_STATUS_IND_V01)
-				dfc_do_burst_flow_control(dfc, svc_ind);
+				dfc_do_burst_flow_control(
+						dfc, &svc_ind->d.dfc_info);
 			else if (svc_ind->msg_id ==
 					QMI_DFC_TX_LINK_STATUS_IND_V01)
-				dfc_handle_tx_link_status_ind(dfc, svc_ind);
+				dfc_handle_tx_link_status_ind(
+						dfc, &svc_ind->d.tx_status);
 		}
 		kfree(svc_ind);
 	} while (1);
@@ -1583,7 +1517,7 @@ void dfc_qmi_query_flow(void *dfc_data)
 	svc_ind->d.dfc_info.flow_status_len = resp->flow_status_len;
 	memcpy(&svc_ind->d.dfc_info.flow_status, resp->flow_status,
 		sizeof(resp->flow_status[0]) * resp->flow_status_len);
-	dfc_do_burst_flow_control(data, svc_ind);
+	dfc_do_burst_flow_control(data, &svc_ind->d.dfc_info);
 
 done:
 	kfree(svc_ind);
diff --git a/drivers/soc/qcom/glink_probe.c b/drivers/soc/qcom/glink_probe.c
index 721d80d..678efeb 100644
--- a/drivers/soc/qcom/glink_probe.c
+++ b/drivers/soc/qcom/glink_probe.c
@@ -187,6 +187,7 @@ static void glink_ssr_init_notify(struct glink_ssr *ssr)
 
 		nb->nb.notifier_call = glink_ssr_ssr_cb;
 		nb->nb.priority = GLINK_SSR_PRIORITY;
+		nb->ssr = ssr;
 
 		handle = subsys_notif_register_notifier(nb->ssr_label, &nb->nb);
 		if (IS_ERR_OR_NULL(handle)) {
@@ -195,7 +196,6 @@ static void glink_ssr_init_notify(struct glink_ssr *ssr)
 			continue;
 		}
 
-		nb->ssr = ssr;
 		nb->ssr_register_handle = handle;
 		list_add_tail(&nb->list, &ssr->notify_list);
 	}
diff --git a/drivers/soc/qcom/qmi_rmnet.c b/drivers/soc/qcom/qmi_rmnet.c
index 91c99f2..46732bf 100644
--- a/drivers/soc/qcom/qmi_rmnet.c
+++ b/drivers/soc/qcom/qmi_rmnet.c
@@ -23,11 +23,15 @@
 
 #define FLAG_DFC_MASK 0x000F
 #define FLAG_POWERSAVE_MASK 0x0010
+#define FLAG_QMAP_MASK 0x0020
+
 #define FLAG_TO_MODE(f) ((f) & FLAG_DFC_MASK)
 #define DFC_SUPPORTED_MODE(m) \
 	((m) == DFC_MODE_FLOW_ID || (m) == DFC_MODE_MQ_NUM)
+#define FLAG_TO_QMAP(f) ((f) & FLAG_QMAP_MASK)
 
 int dfc_mode;
+int dfc_qmap;
 #define IS_ANCILLARY(type) ((type) != AF_INET && (type) != AF_INET6)
 
 unsigned int rmnet_wq_frequency __read_mostly = 1000;
@@ -82,7 +86,7 @@ void *qmi_rmnet_has_dfc_client(struct qmi_info *qmi)
 {
 	int i;
 
-	if (!qmi || !DFC_SUPPORTED_MODE(FLAG_TO_MODE(qmi->flag)))
+	if (!qmi)
 		return NULL;
 
 	for (i = 0; i < MAX_CLIENT_NUM; i++) {
@@ -379,18 +383,12 @@ static void qmi_rmnet_query_flows(struct qmi_info *qmi)
 	int i;
 
 	for (i = 0; i < MAX_CLIENT_NUM; i++) {
-		if (qmi->dfc_clients[i])
+		if (qmi->dfc_clients[i] && !dfc_qmap)
 			dfc_qmi_query_flow(qmi->dfc_clients[i]);
 	}
 }
 
 #else
-static inline void
-qmi_rmnet_update_flow_link(struct qmi_info *qmi, struct net_device *dev,
-			   struct rmnet_flow_map *itm, int add_flow)
-{
-}
-
 static inline void qmi_rmnet_clean_flow_list(struct qos_info *qos)
 {
 }
@@ -423,7 +421,7 @@ static inline void qmi_rmnet_query_flows(struct qmi_info *qmi)
 static int
 qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
 {
-	int idx, rc, err = 0;
+	int idx, err = 0;
 	struct svc_info svc;
 
 	ASSERT_RTNL();
@@ -447,18 +445,17 @@ qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
 	svc.ep_type = tcm->tcm_info;
 	svc.iface_id = tcm->tcm_parent;
 
-	if (DFC_SUPPORTED_MODE(FLAG_TO_MODE(tcm->tcm_ifindex)) &&
+	if (DFC_SUPPORTED_MODE(dfc_mode) &&
 	    !qmi->dfc_clients[idx] && !qmi->dfc_pending[idx]) {
-		rc = dfc_qmi_client_init(port, idx, &svc, qmi);
-		if (rc < 0)
-			err = rc;
+		if (dfc_qmap)
+			err = dfc_qmap_client_init(port, idx, &svc, qmi);
+		else
+			err = dfc_qmi_client_init(port, idx, &svc, qmi);
 	}
 
 	if ((tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) &&
 	    (idx == 0) && !qmi->wda_client && !qmi->wda_pending) {
-		rc = wda_qmi_client_init(port, &svc, qmi);
-		if (rc < 0)
-			err = rc;
+		err = wda_qmi_client_init(port, &svc, qmi);
 	}
 
 	return err;
@@ -477,7 +474,10 @@ __qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, int idx)
 		data = qmi->dfc_pending[idx];
 
 	if (data) {
-		dfc_qmi_client_exit(data);
+		if (dfc_qmap)
+			dfc_qmap_client_exit(data);
+		else
+			dfc_qmi_client_exit(data);
 		qmi->dfc_clients[idx] = NULL;
 		qmi->dfc_pending[idx] = NULL;
 	}
@@ -524,20 +524,22 @@ void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt)
 
 	switch (tcm->tcm_family) {
 	case NLMSG_FLOW_ACTIVATE:
-		if (!qmi || !DFC_SUPPORTED_MODE(FLAG_TO_MODE(qmi->flag)) ||
+		if (!qmi || !DFC_SUPPORTED_MODE(dfc_mode) ||
 		    !qmi_rmnet_has_dfc_client(qmi))
 			return;
 
 		qmi_rmnet_add_flow(dev, tcm, qmi);
 		break;
 	case NLMSG_FLOW_DEACTIVATE:
-		if (!qmi || !DFC_SUPPORTED_MODE(FLAG_TO_MODE(qmi->flag)))
+		if (!qmi || !DFC_SUPPORTED_MODE(dfc_mode))
 			return;
 
 		qmi_rmnet_del_flow(dev, tcm, qmi);
 		break;
 	case NLMSG_CLIENT_SETUP:
 		dfc_mode = FLAG_TO_MODE(tcm->tcm_ifindex);
+		dfc_qmap = FLAG_TO_QMAP(tcm->tcm_ifindex);
+
 		if (!DFC_SUPPORTED_MODE(dfc_mode) &&
 		    !(tcm->tcm_ifindex & FLAG_POWERSAVE_MASK))
 			return;
@@ -628,7 +630,7 @@ void qmi_rmnet_enable_all_flows(struct net_device *dev)
 			continue;
 		do_wake = !bearer->grant_size;
 		bearer->grant_size = DEFAULT_GRANT;
-		bearer->grant_thresh = DEFAULT_GRANT;
+		bearer->grant_thresh = qmi_rmnet_grant_per(DEFAULT_GRANT);
 		bearer->seq = 0;
 		bearer->ack_req = 0;
 		bearer->tcp_bidir = false;
@@ -795,7 +797,7 @@ void qmi_rmnet_ps_on_notify(void *port)
 {
 	struct qmi_rmnet_ps_ind *tmp;
 
-	list_for_each_entry(tmp, &ps_list, list)
+	list_for_each_entry_rcu(tmp, &ps_list, list)
 		tmp->ps_on_handler(port);
 }
 EXPORT_SYMBOL(qmi_rmnet_ps_on_notify);
@@ -804,8 +806,9 @@ void qmi_rmnet_ps_off_notify(void *port)
 {
 	struct qmi_rmnet_ps_ind *tmp;
 
-	list_for_each_entry(tmp, &ps_list, list)
+	list_for_each_entry_rcu(tmp, &ps_list, list)
 		tmp->ps_off_handler(port);
+
 }
 EXPORT_SYMBOL(qmi_rmnet_ps_off_notify);
 
@@ -831,13 +834,12 @@ int qmi_rmnet_ps_ind_deregister(void *port,
 	if (!port || !ps_ind)
 		return -EINVAL;
 
-	list_for_each_entry(tmp, &ps_list, list) {
+	list_for_each_entry_rcu(tmp, &ps_list, list) {
 		if (tmp == ps_ind) {
 			list_del_rcu(&ps_ind->list);
 			goto done;
 		}
 	}
-
 done:
 	return 0;
 }
diff --git a/drivers/soc/qcom/qmi_rmnet_i.h b/drivers/soc/qcom/qmi_rmnet_i.h
index 1466822..15dee7c 100644
--- a/drivers/soc/qcom/qmi_rmnet_i.h
+++ b/drivers/soc/qcom/qmi_rmnet_i.h
@@ -9,9 +9,6 @@
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
 
-#define IP_VER_4 4
-#define IP_VER_6 6
-
 #define MAX_MQ_NUM 10
 #define MAX_CLIENT_NUM 2
 #define MAX_FLOW_NUM 32
@@ -21,6 +18,7 @@
 #define DFC_MODE_FLOW_ID 2
 #define DFC_MODE_MQ_NUM 3
 extern int dfc_mode;
+extern int dfc_qmap;
 
 struct rmnet_bearer_map {
 	struct list_head list;
@@ -35,6 +33,7 @@ struct rmnet_bearer_map {
 	bool tcp_bidir;
 	bool rat_switch;
 	bool tx_off;
+	u32 ack_txid;
 };
 
 struct rmnet_flow_map {
@@ -125,6 +124,13 @@ void dfc_qmi_query_flow(void *dfc_data);
 int dfc_bearer_flow_ctl(struct net_device *dev,
 			struct rmnet_bearer_map *bearer,
 			struct qos_info *qos);
+
+int dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
+			 struct qmi_info *qmi);
+
+void dfc_qmap_client_exit(void *dfc_data);
+
+void dfc_qmap_send_ack(struct qos_info *qos, u8 bearer_id, u16 seq, u8 type);
 #else
 static inline struct rmnet_flow_map *
 qmi_rmnet_get_flow_map(struct qos_info *qos_info,
@@ -150,17 +156,6 @@ static inline void dfc_qmi_client_exit(void *dfc_data)
 {
 }
 
-static inline void
-dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
-		    int ip_type, u32 mark, unsigned int len)
-{
-}
-
-static inline void
-dfc_qmi_query_flow(void *dfc_data)
-{
-}
-
 static inline int
 dfc_bearer_flow_ctl(struct net_device *dev,
 		    struct rmnet_bearer_map *bearer,
@@ -168,6 +163,17 @@ dfc_bearer_flow_ctl(struct net_device *dev,
 {
 	return 0;
 }
+
+static inline int
+dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
+		     struct qmi_info *qmi)
+{
+	return -EINVAL;
+}
+
+static inline void dfc_qmap_client_exit(void *dfc_data)
+{
+}
 #endif
 
 #ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE
diff --git a/drivers/soc/qcom/rmnet_ctl/Kconfig b/drivers/soc/qcom/rmnet_ctl/Kconfig
new file mode 100644
index 0000000..bfb91fbd
--- /dev/null
+++ b/drivers/soc/qcom/rmnet_ctl/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# RMNET CTL driver
+#
+
+menuconfig RMNET_CTL
+	tristate "RmNet Control driver"
+	depends on MHI_BUS
+	help
+	  Enable the RMNET CTL module which is used for communicating with
+	  device via map command protocol. This module will receive QMAP
+	  control commands via MHI.
diff --git a/drivers/soc/qcom/rmnet_ctl/Makefile b/drivers/soc/qcom/rmnet_ctl/Makefile
new file mode 100644
index 0000000..bf798da
--- /dev/null
+++ b/drivers/soc/qcom/rmnet_ctl/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the RMNET CTL module
+#
+
+rmnet_ctl-y		 += rmnet_ctl_client.o
+rmnet_ctl-y		 += rmnet_ctl_mhi.o
+obj-$(CONFIG_RMNET_CTL) += rmnet_ctl.o
diff --git a/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_client.c b/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_client.c
new file mode 100644
index 0000000..299b301
--- /dev/null
+++ b/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_client.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * RMNET_CTL client handlers
+ *
+ */
+
+#include <soc/qcom/rmnet_ctl.h>
+#include "rmnet_ctl_client.h"
+
+struct rmnet_ctl_client {
+	struct rmnet_ctl_client_hooks hooks;
+};
+
+struct rmnet_ctl_endpoint {
+	struct rmnet_ctl_dev __rcu *dev;
+	struct rmnet_ctl_client __rcu *client;
+};
+
+static DEFINE_SPINLOCK(client_lock);
+static struct rmnet_ctl_endpoint ctl_ep;
+
+void rmnet_ctl_endpoint_setdev(const struct rmnet_ctl_dev *dev)
+{
+	rcu_assign_pointer(ctl_ep.dev, dev);
+}
+
+void rmnet_ctl_endpoint_post(const void *data, size_t len)
+{
+	struct rmnet_ctl_client *client;
+	struct sk_buff *skb;
+
+	if (unlikely(!data || !len))
+		return;
+
+	rcu_read_lock();
+
+	client = rcu_dereference(ctl_ep.client);
+
+	if (client && client->hooks.ctl_dl_client_hook) {
+		skb = alloc_skb(len, GFP_ATOMIC);
+		if (skb) {
+			skb_put_data(skb, data, len);
+			skb->protocol = htons(ETH_P_MAP);
+			client->hooks.ctl_dl_client_hook(skb);
+		}
+	}
+
+	rcu_read_unlock();
+}
+
+void *rmnet_ctl_register_client(struct rmnet_ctl_client_hooks *hook)
+{
+	struct rmnet_ctl_client *client;
+
+	if (!hook)
+		return NULL;
+
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
+	if (!client)
+		return NULL;
+	client->hooks = *hook;
+
+	spin_lock(&client_lock);
+
+	/* Only support one client for now */
+	if (rcu_dereference(ctl_ep.client)) {
+		spin_unlock(&client_lock);
+		kfree(client);
+		return NULL;
+	}
+
+	rcu_assign_pointer(ctl_ep.client, client);
+
+	spin_unlock(&client_lock);
+
+	return client;
+}
+EXPORT_SYMBOL(rmnet_ctl_register_client);
+
+int rmnet_ctl_unregister_client(void *handle)
+{
+	struct rmnet_ctl_client *client = (struct rmnet_ctl_client *)handle;
+
+	spin_lock(&client_lock);
+
+	if (rcu_dereference(ctl_ep.client) != client) {
+		spin_unlock(&client_lock);
+		return -EINVAL;
+	}
+
+	RCU_INIT_POINTER(ctl_ep.client, NULL);
+
+	spin_unlock(&client_lock);
+
+	synchronize_rcu();
+	kfree(client);
+
+	return 0;
+}
+EXPORT_SYMBOL(rmnet_ctl_unregister_client);
+
+int rmnet_ctl_send_client(void *handle, struct sk_buff *skb)
+{
+	struct rmnet_ctl_client *client = (struct rmnet_ctl_client *)handle;
+	struct rmnet_ctl_dev *dev;
+	int rc = -EINVAL;
+
+	if (client != rcu_dereference(ctl_ep.client))
+		return rc;
+
+	rcu_read_lock();
+
+	dev = rcu_dereference(ctl_ep.dev);
+	if (dev && dev->xmit)
+		rc = dev->xmit(dev, skb);
+
+	rcu_read_unlock();
+
+	return rc;
+}
+EXPORT_SYMBOL(rmnet_ctl_send_client);
diff --git a/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_client.h b/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_client.h
new file mode 100644
index 0000000..6362581
--- /dev/null
+++ b/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_client.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * RMNET_CTL client handlers
+ *
+ */
+
+#ifndef _RMNET_CTL_CLIENT_H_
+#define _RMNET_CTL_CLIENT_H_
+
+#include <linux/skbuff.h>
+
+struct rmnet_ctl_stats {
+	u64 rx_pkts;
+	u64 rx_err;
+	u64 tx_pkts;
+	u64 tx_err;
+	u64 tx_complete;
+};
+
+struct rmnet_ctl_dev {
+	int (*xmit)(struct rmnet_ctl_dev *dev, struct sk_buff *skb);
+	struct rmnet_ctl_stats stats;
+};
+
+void rmnet_ctl_endpoint_post(const void *data, size_t len);
+void rmnet_ctl_endpoint_setdev(const struct rmnet_ctl_dev *dev);
+
+#endif /* _RMNET_CTL_CLIENT_H_ */
diff --git a/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_mhi.c b/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_mhi.c
new file mode 100644
index 0000000..af84e13
--- /dev/null
+++ b/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_mhi.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * RMNET_CTL mhi handler
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/skbuff.h>
+#include <linux/mhi.h>
+#include "rmnet_ctl_client.h"
+
+#define RMNET_CTL_DEFAULT_MRU 1024
+
+struct rmnet_ctl_mhi_dev {
+	struct mhi_device *mhi_dev;
+	struct rmnet_ctl_dev dev;
+	u32 mru;
+	spinlock_t rx_lock; /* rx lock */
+	spinlock_t tx_lock; /* tx lock */
+	atomic_t in_reset;
+};
+
+static int rmnet_ctl_send_mhi(struct rmnet_ctl_dev *dev, struct sk_buff *skb)
+{
+	struct rmnet_ctl_mhi_dev *ctl_dev = container_of(
+				dev, struct rmnet_ctl_mhi_dev, dev);
+	int rc;
+
+	spin_lock_bh(&ctl_dev->tx_lock);
+
+	rc = mhi_queue_transfer(ctl_dev->mhi_dev,
+				DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
+	if (rc)
+		dev->stats.tx_err++;
+	else
+		dev->stats.tx_pkts++;
+
+	spin_unlock_bh(&ctl_dev->tx_lock);
+
+	return rc;
+}
+
+static void rmnet_ctl_alloc_buffers(struct rmnet_ctl_mhi_dev *ctl_dev,
+				    gfp_t gfp, void *free_buf)
+{
+	struct mhi_device *mhi_dev = ctl_dev->mhi_dev;
+	void *buf;
+	int no_tre, i, rc;
+
+	no_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
+	for (i = 0; i < no_tre; i++) {
+		if (free_buf) {
+			buf = free_buf;
+			free_buf = NULL;
+		} else {
+			buf = kmalloc(ctl_dev->mru, gfp);
+		}
+
+		if (!buf)
+			return;
+
+		spin_lock_bh(&ctl_dev->rx_lock);
+		rc = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE,
+					buf, ctl_dev->mru, MHI_EOT);
+		spin_unlock_bh(&ctl_dev->rx_lock);
+
+		if (rc) {
+			kfree(buf);
+			return;
+		}
+	}
+}
+
+static void rmnet_ctl_dl_callback(struct mhi_device *mhi_dev,
+				  struct mhi_result *mhi_res)
+{
+	struct rmnet_ctl_mhi_dev *ctl_dev = dev_get_drvdata(&mhi_dev->dev);
+
+	if (mhi_res->transaction_status || !mhi_res->buf_addr) {
+		ctl_dev->dev.stats.rx_err++;
+	} else {
+		ctl_dev->dev.stats.rx_pkts++;
+		rmnet_ctl_endpoint_post(mhi_res->buf_addr,
+					mhi_res->bytes_xferd);
+	}
+
+	/* Re-supply receive buffers */
+	rmnet_ctl_alloc_buffers(ctl_dev, GFP_ATOMIC, mhi_res->buf_addr);
+}
+
+static void rmnet_ctl_ul_callback(struct mhi_device *mhi_dev,
+				  struct mhi_result *mhi_res)
+{
+	struct rmnet_ctl_mhi_dev *ctl_dev = dev_get_drvdata(&mhi_dev->dev);
+	struct sk_buff *skb = (struct sk_buff *)mhi_res->buf_addr;
+
+	if (skb) {
+		ctl_dev->dev.stats.tx_complete++;
+		kfree_skb(skb);
+	}
+}
+
+static void rmnet_ctl_status_callback(struct mhi_device *mhi_dev,
+				      enum MHI_CB mhi_cb)
+{
+	struct rmnet_ctl_mhi_dev *ctl_dev = dev_get_drvdata(&mhi_dev->dev);
+
+	if (mhi_cb != MHI_CB_FATAL_ERROR)
+		return;
+
+	atomic_inc(&ctl_dev->in_reset);
+}
+
+static int rmnet_ctl_probe(struct mhi_device *mhi_dev,
+			   const struct mhi_device_id *id)
+{
+	struct rmnet_ctl_mhi_dev *ctl_dev;
+	struct device_node *of_node = mhi_dev->dev.of_node;
+	int rc;
+
+	ctl_dev = devm_kzalloc(&mhi_dev->dev, sizeof(*ctl_dev), GFP_KERNEL);
+	if (!ctl_dev)
+		return -ENOMEM;
+
+	ctl_dev->mhi_dev = mhi_dev;
+	ctl_dev->dev.xmit = rmnet_ctl_send_mhi;
+
+	spin_lock_init(&ctl_dev->rx_lock);
+	spin_lock_init(&ctl_dev->tx_lock);
+	atomic_set(&ctl_dev->in_reset, 0);
+	dev_set_drvdata(&mhi_dev->dev, ctl_dev);
+
+	rc = of_property_read_u32(of_node, "mhi,mru", &ctl_dev->mru);
+	if (rc || !ctl_dev->mru)
+		ctl_dev->mru = RMNET_CTL_DEFAULT_MRU;
+
+	rc = mhi_prepare_for_transfer(mhi_dev);
+	if (rc) {
+		pr_err("%s(): Failed to prep for transfer %d\n", __func__, rc);
+		return -EINVAL;
+	}
+
+	/* Post receive buffers */
+	rmnet_ctl_alloc_buffers(ctl_dev, GFP_KERNEL, NULL);
+
+	rmnet_ctl_endpoint_setdev(&ctl_dev->dev);
+
+	pr_info("rmnet_ctl driver probed\n");
+
+	return 0;
+}
+
+static void rmnet_ctl_remove(struct mhi_device *mhi_dev)
+{
+	rmnet_ctl_endpoint_setdev(NULL);
+	synchronize_rcu();
+	dev_set_drvdata(&mhi_dev->dev, NULL);
+
+	pr_info("rmnet_ctl driver removed\n");
+}
+
+static const struct mhi_device_id rmnet_ctl_mhi_match[] = {
+	{ .chan = "RMNET_CTL" },
+	{}
+};
+
+static struct mhi_driver rmnet_ctl_driver = {
+	.probe = rmnet_ctl_probe,
+	.remove = rmnet_ctl_remove,
+	.dl_xfer_cb = rmnet_ctl_dl_callback,
+	.ul_xfer_cb = rmnet_ctl_ul_callback,
+	.status_cb = rmnet_ctl_status_callback,
+	.id_table = rmnet_ctl_mhi_match,
+	.driver = {
+		.name = "rmnet_ctl",
+		.owner = THIS_MODULE,
+	},
+};
+
+module_driver(rmnet_ctl_driver,
+	      mhi_driver_register, mhi_driver_unregister);
+
+MODULE_DESCRIPTION("RmNet Control Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c
index 1ab2680..84ab642 100644
--- a/drivers/soc/qcom/secure_buffer.c
+++ b/drivers/soc/qcom/secure_buffer.c
@@ -14,6 +14,9 @@
 #include <soc/qcom/scm.h>
 #include <soc/qcom/secure_buffer.h>
 
+#define CREATE_TRACE_POINTS
+#include "trace_secure_buffer.h"
+
 DEFINE_MUTEX(secure_buffer_mutex);
 
 struct cp2_mem_chunks {
@@ -28,24 +31,12 @@ struct cp2_lock_req {
 	u32 lock;
 } __attribute__ ((__packed__));
 
-struct mem_prot_info {
-	phys_addr_t addr;
-	u64 size;
-};
-
 #define MEM_PROT_ASSIGN_ID		0x16
 #define MEM_PROTECT_LOCK_ID2		0x0A
 #define MEM_PROTECT_LOCK_ID2_FLAT	0x11
 #define V2_CHUNK_SIZE           SZ_1M
 #define FEATURE_ID_CP 12
 
-struct dest_vm_and_perm_info {
-	u32 vm;
-	u32 perm;
-	u64 ctx;
-	u32 ctx_size;
-};
-
 #define BATCH_MAX_SIZE SZ_2M
 #define BATCH_MAX_SECTIONS 32
 
@@ -228,9 +219,13 @@ static int batched_hyp_assign(struct sg_table *table, struct scm_desc *desc)
 	unsigned int entries_size;
 	unsigned int batch_start = 0;
 	unsigned int batches_processed;
+	unsigned int i = 0;
+	u64 total_delta;
 	struct scatterlist *curr_sgl = table->sgl;
 	struct scatterlist *next_sgl;
 	int ret = 0;
+	ktime_t batch_assign_start_ts;
+	ktime_t first_assign_ts;
 	struct mem_prot_info *sg_table_copy = kcalloc(BATCH_MAX_SECTIONS,
 						      sizeof(*sg_table_copy),
 						      GFP_KERNEL);
@@ -238,6 +233,7 @@ static int batched_hyp_assign(struct sg_table *table, struct scm_desc *desc)
 	if (!sg_table_copy)
 		return -ENOMEM;
 
+	first_assign_ts = ktime_get();
 	while (batch_start < table->nents) {
 		batches_processed = get_batches_from_sgl(sg_table_copy,
 							 curr_sgl, &next_sgl);
@@ -248,8 +244,13 @@ static int batched_hyp_assign(struct sg_table *table, struct scm_desc *desc)
 		desc->args[0] = virt_to_phys(sg_table_copy);
 		desc->args[1] = entries_size;
 
+		trace_hyp_assign_batch_start(sg_table_copy, batches_processed);
+		batch_assign_start_ts = ktime_get();
 		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
 				MEM_PROT_ASSIGN_ID), desc);
+		trace_hyp_assign_batch_end(ret, ktime_us_delta(ktime_get(),
+					   batch_assign_start_ts));
+		i++;
 		if (ret) {
 			pr_info("%s: Failed to assign memory protection, ret = %d\n",
 				__func__, ret);
@@ -263,7 +264,8 @@ static int batched_hyp_assign(struct sg_table *table, struct scm_desc *desc)
 
 		batch_start += batches_processed;
 	}
-
+	total_delta = ktime_us_delta(ktime_get(), first_assign_ts);
+	trace_hyp_assign_end(total_delta, total_delta / i);
 	kfree(sg_table_copy);
 	return ret;
 }
@@ -288,7 +290,7 @@ static int __hyp_assign_table(struct sg_table *table,
 	size_t dest_vm_copy_size;
 
 	if (!table || !table->sgl || !source_vm_list || !source_nelems ||
-	    !dest_vmids || !dest_perms || !dest_nelems)
+	    !dest_vmids || !dest_perms || !dest_nelems || !table->nents)
 		return -EINVAL;
 
 	/*
@@ -333,6 +335,8 @@ static int __hyp_assign_table(struct sg_table *table,
 	dmac_flush_range(dest_vm_copy,
 			 (void *)dest_vm_copy + dest_vm_copy_size);
 
+	trace_hyp_assign_info(source_vm_list, source_nelems, dest_vmids,
+			      dest_perms, dest_nelems);
 	ret = batched_hyp_assign(table, &desc);
 
 	mutex_unlock(&secure_buffer_mutex);
diff --git a/drivers/soc/qcom/smcinvoke.c b/drivers/soc/qcom/smcinvoke.c
index 0a17c1e..f735395 100644
--- a/drivers/soc/qcom/smcinvoke.c
+++ b/drivers/soc/qcom/smcinvoke.c
@@ -850,8 +850,10 @@ static void process_tzcb_req(void *buf, size_t buf_len, struct file **arr_filp)
 
 	cb_req = kmemdup(buf, buf_len, GFP_KERNEL);
 	if (!cb_req) {
-		ret =  OBJECT_ERROR_KMEM;
-		goto out;
+		/* we need to return error to caller so fill up result */
+		cb_req = buf;
+		cb_req->result = OBJECT_ERROR_KMEM;
+		return;
 	}
 
 	/* check whether it is to be served by kernel or userspace */
@@ -909,9 +911,11 @@ static void process_tzcb_req(void *buf, size_t buf_len, struct file **arr_filp)
 			release_tzhandle_locked(cb_req->hdr.tzhandle);
 		}
 	}
-	hash_del(&cb_txn->hash);
-	memcpy(buf, cb_req, buf_len);
-	kref_put(&cb_txn->ref_cnt, delete_cb_txn);
+	if (cb_txn) {
+		hash_del(&cb_txn->hash);
+		memcpy(buf, cb_req, buf_len);
+		kref_put(&cb_txn->ref_cnt, delete_cb_txn);
+	}
 	mutex_unlock(&g_smcinvoke_lock);
 }
 
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index c2b48dd..8d37358 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -1439,6 +1439,7 @@ static int spcom_device_open(struct inode *inode, struct file *filp)
 	 */
 	if (ch->pid == pid) {
 		pr_err("client is already registered with channel[%s]\n", name);
+		mutex_unlock(&ch->lock);
 		return -EINVAL;
 	}
 
diff --git a/drivers/soc/qcom/spss_utils.c b/drivers/soc/qcom/spss_utils.c
index 9472d4f..910991b 100644
--- a/drivers/soc/qcom/spss_utils.c
+++ b/drivers/soc/qcom/spss_utils.c
@@ -63,15 +63,21 @@ static u32 spss_emul_type_reg_addr; /* TCSR_SOC_EMULATION_TYPE */
 static void *iar_notif_handle;
 static struct notifier_block *iar_nb;
 
-#define CMAC_SIZE_IN_BYTES (128/8) /* 128 bit */
+#define CMAC_SIZE_IN_BYTES (128/8) /* 128 bit = 16 bytes */
+#define CMAC_SIZE_IN_DWORDS (CMAC_SIZE_IN_BYTES/sizeof(u32)) /* 4 dwords */
+
+/* Asym , Crypt , Keym */
+#define NUM_UEFI_APPS 3
 
 static u32 pil_addr;
 static u32 pil_size;
-static u32 cmac_buf[CMAC_SIZE_IN_BYTES/sizeof(u32)]; /* saved cmac */
-static u32 pbl_cmac_buf[CMAC_SIZE_IN_BYTES/sizeof(u32)]; /* pbl cmac */
+static u32 cmac_buf[CMAC_SIZE_IN_DWORDS]; /* saved cmac */
+static u32 pbl_cmac_buf[CMAC_SIZE_IN_DWORDS]; /* pbl cmac */
+
+static u32 apps_cmac_buf[NUM_UEFI_APPS][CMAC_SIZE_IN_DWORDS];
+
 static u32 iar_state;
 static bool is_iar_enabled;
-static bool is_pbl_ce; /* Did SPU PBL performed Cryptographic Erase (CE) */
 
 static void __iomem *cmac_mem;
 static size_t cmac_mem_size = SZ_4K; /* XPU align to 4KB */
@@ -97,7 +103,7 @@ static struct spss_utils_device *spss_utils_dev;
 
 /* static functions declaration */
 static int spss_set_fw_cmac(u32 *cmac, size_t cmac_size);
-static int spss_get_pbl_calc_cmac(u32 *cmac, size_t cmac_size);
+static int spss_get_pbl_and_apps_calc_cmac(void);
 
 /*==========================================================================*/
 /*		Device Sysfs */
@@ -200,7 +206,7 @@ static ssize_t cmac_buf_show(struct device *dev,
 		return -EINVAL;
 	}
 
-	ret = snprintf(buf, PAGE_SIZE, "0x%x,0x%x,0x%x,0x%x\n",
+	ret = snprintf(buf, PAGE_SIZE, "0x%08x,0x%08x,0x%08x,0x%08x\n",
 		cmac_buf[0], cmac_buf[1], cmac_buf[2], cmac_buf[3]);
 
 	return ret;
@@ -245,24 +251,6 @@ static ssize_t iar_enabled_show(struct device *dev,
 
 static DEVICE_ATTR_RO(iar_enabled);
 
-static ssize_t pbl_ce_show(struct device *dev,
-		struct device_attribute *attr,
-		char *buf)
-{
-	int ret = 0;
-
-	if (!dev || !attr || !buf) {
-		pr_err("invalid param.\n");
-		return -EINVAL;
-	}
-
-	ret = snprintf(buf, PAGE_SIZE, "0x%x\n", is_pbl_ce);
-
-	return ret;
-}
-
-static DEVICE_ATTR_RO(pbl_ce);
-
 static ssize_t pbl_cmac_show(struct device *dev,
 		struct device_attribute *attr,
 		char *buf)
@@ -274,7 +262,7 @@ static ssize_t pbl_cmac_show(struct device *dev,
 		return -EINVAL;
 	}
 
-	ret = snprintf(buf, PAGE_SIZE, "0x%x,0x%x,0x%x,0x%x\n",
+	ret = snprintf(buf, PAGE_SIZE, "0x%08x,0x%08x,0x%08x,0x%08x\n",
 	    pbl_cmac_buf[0], pbl_cmac_buf[1], pbl_cmac_buf[2], pbl_cmac_buf[3]);
 
 	return ret;
@@ -282,6 +270,21 @@ static ssize_t pbl_cmac_show(struct device *dev,
 
 static DEVICE_ATTR_RO(pbl_cmac);
 
+static ssize_t apps_cmac_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	if (!dev || !attr || !buf) {
+		pr_err("invalid param.\n");
+		return -EINVAL;
+	}
+
+	memcpy(buf, apps_cmac_buf, sizeof(apps_cmac_buf));
+
+	return sizeof(apps_cmac_buf);
+}
+
+static DEVICE_ATTR_RO(apps_cmac);
+
 /*--------------------------------------------------------------------------*/
 static int spss_create_sysfs(struct device *dev)
 {
@@ -323,22 +326,23 @@ static int spss_create_sysfs(struct device *dev)
 		goto remove_iar_state;
 	}
 
-	ret = device_create_file(dev, &dev_attr_pbl_ce);
-	if (ret < 0) {
-		pr_err("failed to create sysfs file for pbl_ce.\n");
-		goto remove_iar_enabled;
-	}
-
 	ret = device_create_file(dev, &dev_attr_pbl_cmac);
 	if (ret < 0) {
 		pr_err("failed to create sysfs file for pbl_cmac.\n");
-		goto remove_pbl_ce;
+		goto remove_iar_enabled;
 	}
 
+	ret = device_create_file(dev, &dev_attr_apps_cmac);
+	if (ret < 0) {
+		pr_err("failed to create sysfs file for apps_cmac.\n");
+		goto remove_pbl_cmac;
+	}
+
+
 	return 0;
 
-remove_pbl_ce:
-		device_remove_file(dev, &dev_attr_pbl_ce);
+remove_pbl_cmac:
+		device_remove_file(dev, &dev_attr_pbl_cmac);
 remove_iar_enabled:
 		device_remove_file(dev, &dev_attr_iar_enabled);
 remove_iar_state:
@@ -393,22 +397,18 @@ static long spss_utils_ioctl(struct file *file,
 		}
 
 		memcpy(cmac_buf, data, sizeof(cmac_buf));
-		pr_info("cmac_buf: 0x%x,0x%x,0x%x,0x%x\n",
+		pr_info("saved fw cmac: 0x%08x,0x%08x,0x%08x,0x%08x\n",
 			cmac_buf[0], cmac_buf[1], cmac_buf[2], cmac_buf[3]);
 
 		/*
 		 * SPSS is loaded now by UEFI,
 		 * so IAR callback is not being called on powerup by PIL.
-		 * therefore read the spu pbl fw cmac from ioctl.
+		 * therefore read the spu pbl fw cmac and apps cmac from ioctl.
 		 * The callback shall be called on spss SSR.
 		 */
-		pr_info("read pbl cmac from shared memory\n");
+		pr_debug("read pbl cmac from shared memory\n");
 		spss_set_fw_cmac(cmac_buf, sizeof(cmac_buf));
-		spss_get_pbl_calc_cmac(pbl_cmac_buf, sizeof(pbl_cmac_buf));
-		if (memcmp(cmac_buf, pbl_cmac_buf, sizeof(cmac_buf)) != 0)
-			is_pbl_ce = true; /* cmacs not the same */
-		else
-			is_pbl_ce = false;
+		spss_get_pbl_and_apps_calc_cmac();
 		break;
 
 	default:
@@ -675,11 +675,12 @@ static int spss_parse_dt(struct device_node *node)
 		return -EFAULT;
 	}
 
-	pr_info("pil_addr [0x%x].\n", pil_addr);
-	pr_info("pil_size [0x%x].\n", pil_size);
+	pr_debug("pil_addr [0x%08x].\n", pil_addr);
+	pr_debug("pil_size [0x%08x].\n", pil_size);
 
 	/* cmac buffer after spss firmware end */
 	cmac_mem_addr = pil_addr + pil_size;
+	pr_info("iar_buf_addr [0x%08x].\n", cmac_mem_addr);
 
 	ret = of_property_read_u32(node, "qcom,spss-fuse3-addr",
 		&spss_fuse3_addr);
@@ -705,7 +706,7 @@ static int spss_parse_dt(struct device_node *node)
 	/* read IAR_FEATURE_ENABLED from soc fuse */
 	val1 = readl_relaxed(spss_fuse3_reg);
 	spss_fuse3_mask = (1<<spss_fuse3_bit);
-	pr_info("iar_enabled fuse, addr [0x%x] val [0x%x] mask [0x%x].\n",
+	pr_debug("iar_enabled fuse, addr [0x%x] val [0x%x] mask [0x%x].\n",
 		spss_fuse3_addr, val1, spss_fuse3_mask);
 	if (val1 & spss_fuse3_mask)
 		is_iar_enabled = true;
@@ -737,7 +738,7 @@ static int spss_parse_dt(struct device_node *node)
 
 	val1 = readl_relaxed(spss_fuse4_reg);
 	spss_fuse4_mask = (0x07 << spss_fuse4_bit); /* 3 bits */
-	pr_info("IAR_STATE fuse, addr [0x%x] val [0x%x] mask [0x%x].\n",
+	pr_debug("IAR_STATE fuse, addr [0x%x] val [0x%x] mask [0x%x].\n",
 	spss_fuse4_addr, val1, spss_fuse4_mask);
 	val1 = ((val1 & spss_fuse4_mask) >> spss_fuse4_bit) & 0x07;
 
@@ -776,23 +777,43 @@ static int spss_set_fw_cmac(u32 *cmac, size_t cmac_size)
 	return 0;
 }
 
-static int spss_get_pbl_calc_cmac(u32 *cmac, size_t cmac_size)
+static int spss_get_pbl_and_apps_calc_cmac(void)
 {
 	u8 __iomem *reg = NULL;
-	int i;
+	int i, j;
 	u32 val;
 
 	if (cmac_mem == NULL)
 		return -EFAULT;
 
-	/* PBL calculated cmac after HLOS expected cmac */
-	reg = cmac_mem + cmac_size;
+	reg = cmac_mem; /* IAR buffer base */
+	reg += CMAC_SIZE_IN_BYTES; /* skip the saved cmac */
 	pr_debug("reg [%pK]\n", reg);
 
-	for (i = 0; i < cmac_size/4; i++) {
-		val = readl_relaxed(reg + i*sizeof(u32));
-		cmac[i] = val;
-		pr_debug("cmac[%d] [0x%x]\n", (int) i, (int) val);
+	/* get pbl fw cmac from ddr */
+	for (i = 0; i < CMAC_SIZE_IN_DWORDS; i++) {
+		val = readl_relaxed(reg);
+		pbl_cmac_buf[i] = val;
+		reg += sizeof(u32);
+	}
+	reg += CMAC_SIZE_IN_BYTES; /* skip the saved cmac */
+
+	pr_info("pbl_cmac_buf : 0x%08x,0x%08x,0x%08x,0x%08x\n",
+	    pbl_cmac_buf[0], pbl_cmac_buf[1],
+	    pbl_cmac_buf[2], pbl_cmac_buf[3]);
+
+	/* get apps cmac from ddr */
+	for (j = 0; j < NUM_UEFI_APPS; j++) {
+		for (i = 0; i < CMAC_SIZE_IN_DWORDS; i++) {
+			val = readl_relaxed(reg);
+			apps_cmac_buf[j][i] = val;
+			reg += sizeof(u32);
+		}
+		reg += CMAC_SIZE_IN_BYTES; /* skip the saved cmac */
+
+		pr_info("app [%d] cmac : 0x%08x,0x%08x,0x%08x,0x%08x\n", j,
+			apps_cmac_buf[j][0], apps_cmac_buf[j][1],
+			apps_cmac_buf[j][2], apps_cmac_buf[j][3]);
 	}
 
 	return 0;
@@ -815,11 +836,7 @@ static int spss_utils_iar_callback(struct notifier_block *nb,
 		break;
 	case SUBSYS_AFTER_POWERUP:
 		pr_debug("[SUBSYS_AFTER_POWERUP] event.\n");
-		spss_get_pbl_calc_cmac(pbl_cmac_buf, sizeof(pbl_cmac_buf));
-		if (memcmp(cmac_buf, pbl_cmac_buf, sizeof(cmac_buf)) != 0)
-			is_pbl_ce = true; /* cmacs not the same */
-		else
-			is_pbl_ce = false;
+		spss_get_pbl_and_apps_calc_cmac();
 		break;
 	case SUBSYS_BEFORE_AUTH_AND_RESET:
 		pr_debug("[SUBSYS_BEFORE_AUTH_AND_RESET] event.\n");
diff --git a/drivers/soc/qcom/trace_secure_buffer.h b/drivers/soc/qcom/trace_secure_buffer.h
new file mode 100644
index 0000000..f0655c8
--- /dev/null
+++ b/drivers/soc/qcom/trace_secure_buffer.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM secure_buffer
+
+#if !defined(_TRACE_SECURE_BUFFER_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SECURE_BUFFER_H
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <soc/qcom/secure_buffer.h>
+
+TRACE_EVENT(hyp_assign_info,
+
+	TP_PROTO(u32 *source_vm_list,
+		 int source_nelems, int *dest_vmids, int *dest_perms,
+		 int dest_nelems),
+
+	TP_ARGS(source_vm_list, source_nelems, dest_vmids,
+		dest_perms, dest_nelems),
+
+	TP_STRUCT__entry(
+		__field(int, source_nelems)
+		__field(int, dest_nelems)
+		__dynamic_array(u32, source_vm_list, source_nelems)
+		__dynamic_array(int, dest_vmids, dest_nelems)
+		__dynamic_array(int, dest_perms, dest_nelems)
+	),
+
+	TP_fast_assign(
+		__entry->source_nelems = source_nelems;
+		__entry->dest_nelems = dest_nelems;
+		memcpy(__get_dynamic_array(source_vm_list), source_vm_list,
+		       source_nelems * sizeof(*source_vm_list));
+		memcpy(__get_dynamic_array(dest_vmids), dest_vmids,
+		       dest_nelems * sizeof(*dest_vmids));
+		memcpy(__get_dynamic_array(dest_perms), dest_perms,
+		       dest_nelems * sizeof(*dest_perms));
+	),
+
+	TP_printk("srcVMIDs: %s dstVMIDs: %s dstPerms: %s",
+		  __print_array(__get_dynamic_array(source_vm_list),
+				__entry->source_nelems, sizeof(u32)),
+		  __print_array(__get_dynamic_array(dest_vmids),
+				__entry->dest_nelems, sizeof(int)),
+		  __print_array(__get_dynamic_array(dest_perms),
+				__entry->dest_nelems, sizeof(int))
+	)
+);
+
+TRACE_EVENT(hyp_assign_batch_start,
+
+	TP_PROTO(struct mem_prot_info *info, int info_nelems),
+
+	TP_ARGS(info, info_nelems),
+
+	TP_STRUCT__entry(
+		__field(int, info_nelems)
+		__field(u64, batch_size)
+		__dynamic_array(phys_addr_t, addrs, info_nelems)
+		__dynamic_array(u64, sizes, info_nelems)
+	),
+
+	TP_fast_assign(
+		unsigned int i;
+		phys_addr_t *addr_arr_ptr = __get_dynamic_array(addrs);
+		u64 *sizes_arr_ptr = __get_dynamic_array(sizes);
+
+		__entry->info_nelems = info_nelems;
+		__entry->batch_size = 0;
+
+		for (i = 0; i < info_nelems; i++) {
+			addr_arr_ptr[i] = info[i].addr;
+			sizes_arr_ptr[i] = info[i].size;
+			__entry->batch_size += info[i].size;
+		}
+	),
+
+	TP_printk("num entries: %d batch size: %llu phys addrs: %s sizes: %s",
+		  __entry->info_nelems, __entry->batch_size,
+		  __print_array(__get_dynamic_array(addrs),
+				__entry->info_nelems, sizeof(phys_addr_t)),
+		  __print_array(__get_dynamic_array(sizes),
+				__entry->info_nelems, sizeof(u64))
+	)
+);
+
+TRACE_EVENT(hyp_assign_batch_end,
+
+	TP_PROTO(int ret, u64 delta),
+
+	TP_ARGS(ret, delta),
+
+	TP_STRUCT__entry(
+		__field(int, ret)
+		__field(u64, delta)
+	),
+
+	TP_fast_assign(
+		__entry->ret = ret;
+		__entry->delta = delta;
+	),
+
+	TP_printk("ret: %d time delta: %lld us",
+		  __entry->ret, __entry->delta
+	)
+);
+
+TRACE_EVENT(hyp_assign_end,
+
+	TP_PROTO(u64 tot_delta, u64 avg_delta),
+
+	TP_ARGS(tot_delta, avg_delta),
+
+	TP_STRUCT__entry(
+		__field(u64, tot_delta)
+		__field(u64, avg_delta)
+	),
+
+	TP_fast_assign(
+		__entry->tot_delta = tot_delta;
+		__entry->avg_delta = avg_delta;
+	),
+
+	TP_printk("total time delta: %lld us avg batch delta: %lld us",
+		  __entry->tot_delta, __entry->avg_delta
+	)
+);
+#endif /* _TRACE_SECURE_BUFFER_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/soc/qcom/
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace_secure_buffer
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 1c85745..c7d45dc 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1185,6 +1185,28 @@ static const struct file_operations ion_fops = {
 #endif
 };
 
+static int ion_debug_heap_show(struct seq_file *s, void *unused)
+{
+	struct ion_heap *heap = s->private;
+
+	if (heap->debug_show)
+		heap->debug_show(heap, s, unused);
+
+	return 0;
+}
+
+static int ion_debug_heap_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ion_debug_heap_show, inode->i_private);
+}
+
+static const struct file_operations debug_heap_fops = {
+	.open = ion_debug_heap_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
 static int debug_shrink_set(void *data, u64 val)
 {
 	struct ion_heap *heap = data;
@@ -1222,6 +1244,7 @@ DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
 
 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
 {
+	char debug_name[64], buf[256];
 	int ret;
 
 	if (!heap->ops->allocate || !heap->ops->free)
@@ -1249,12 +1272,22 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
 	plist_node_init(&heap->node, -heap->id);
 	plist_add(&heap->node, &dev->heaps);
 
-	if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
-		char debug_name[64];
+	if (heap->debug_show) {
+		snprintf(debug_name, 64, "%s_stats", heap->name);
+		if (!debugfs_create_file(debug_name, 0664, dev->debug_root,
+					 heap, &debug_heap_fops))
+			pr_err("Failed to create heap debugfs at %s/%s\n",
+			       dentry_path(dev->debug_root, buf, 256),
+			       debug_name);
+	}
 
+	if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
 		snprintf(debug_name, 64, "%s_shrink", heap->name);
-		debugfs_create_file(debug_name, 0644, dev->debug_root,
-				    heap, &debug_shrink_fops);
+		if (!debugfs_create_file(debug_name, 0644, dev->debug_root,
+					 heap, &debug_shrink_fops))
+			pr_err("Failed to create heap debugfs at %s/%s\n",
+			       dentry_path(dev->debug_root, buf, 256),
+			       debug_name);
 	}
 
 	dev->heap_cnt++;
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index ed0898f..63e9218 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -49,6 +49,8 @@ static bool pool_refill_ok(struct ion_page_pool *pool)
 
 static inline struct page *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
 {
+	if (fatal_signal_pending(current))
+		return NULL;
 	return alloc_pages(pool->gfp_mask, pool->order);
 }
 
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index 2edf3ee..caf4d4d 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -342,9 +342,9 @@ static irqreturn_t dt3k_interrupt(int irq, void *d)
 static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
 			    unsigned int flags)
 {
-	int divider, base, prescale;
+	unsigned int divider, base, prescale;
 
-	/* This function needs improvment */
+	/* This function needs improvement */
 	/* Don't know if divider==0 works. */
 
 	for (prescale = 0; prescale < 16; prescale++) {
@@ -358,7 +358,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
 			divider = (*nanosec) / base;
 			break;
 		case CMDF_ROUND_UP:
-			divider = (*nanosec) / base;
+			divider = DIV_ROUND_UP(*nanosec, base);
 			break;
 		}
 		if (divider < 65536) {
@@ -368,7 +368,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
 	}
 
 	prescale = 15;
-	base = timer_base * (1 << prescale);
+	base = timer_base * (prescale + 1);
 	divider = 65535;
 	*nanosec = divider * base;
 	return (prescale << 16) | (divider);
diff --git a/drivers/staging/gasket/apex_driver.c b/drivers/staging/gasket/apex_driver.c
index c747e9c..0cef1d6 100644
--- a/drivers/staging/gasket/apex_driver.c
+++ b/drivers/staging/gasket/apex_driver.c
@@ -538,7 +538,7 @@ static ssize_t sysfs_show(struct device *device, struct device_attribute *attr,
 		break;
 	case ATTR_KERNEL_HIB_SIMPLE_PAGE_TABLE_SIZE:
 		ret = scnprintf(buf, PAGE_SIZE, "%u\n",
-				gasket_page_table_num_entries(
+				gasket_page_table_num_simple_entries(
 					gasket_dev->page_table[0]));
 		break;
 	case ATTR_KERNEL_HIB_NUM_ACTIVE_PAGES:
diff --git a/drivers/thermal/msm-tsens.c b/drivers/thermal/msm-tsens.c
index 5449287..f0415de 100644
--- a/drivers/thermal/msm-tsens.c
+++ b/drivers/thermal/msm-tsens.c
@@ -25,6 +25,13 @@ static int tsens_get_temp(void *data, int *temp)
 	return tmdev->ops->get_temp(s, temp);
 }
 
+static int tsens_get_min_temp(void *data, int *temp)
+{
+	struct tsens_sensor *s = data;
+
+	return tsens_2xxx_get_min_temp(s, temp);
+}
+
 static int tsens_set_trip_temp(void *data, int low_temp, int high_temp)
 {
 	struct tsens_sensor *s = data;
@@ -82,6 +89,9 @@ static const struct of_device_id tsens_table[] = {
 	{	.compatible = "qcom,tsens24xx",
 		.data = &data_tsens24xx,
 	},
+	{	.compatible = "qcom,tsens26xx",
+		.data = &data_tsens26xx,
+	},
 	{	.compatible = "qcom,msm8937-tsens",
 		.data = &data_tsens14xx,
 	},
@@ -97,6 +107,10 @@ static struct thermal_zone_of_device_ops tsens_tm_thermal_zone_ops = {
 	.set_trips = tsens_set_trip_temp,
 };
 
+static struct thermal_zone_of_device_ops tsens_tm_min_thermal_zone_ops = {
+	.get_temp = tsens_get_min_temp,
+};
+
 static int get_device_tree_data(struct platform_device *pdev,
 				struct tsens_device *tmdev)
 {
@@ -105,6 +119,7 @@ static int get_device_tree_data(struct platform_device *pdev,
 	const struct tsens_data *data;
 	int rc = 0;
 	struct resource *res_tsens_mem;
+	u32 min_temp_id;
 
 	if (!of_match_node(tsens_table, of_node)) {
 		pr_err("Need to read SoC specific fuse map\n");
@@ -179,6 +194,11 @@ static int get_device_tree_data(struct platform_device *pdev,
 		}
 	}
 
+	if (!of_property_read_u32(of_node, "0C-sensor-num", &min_temp_id))
+		tmdev->min_temp_sensor_id = (int)min_temp_id;
+	else
+		tmdev->min_temp_sensor_id = MIN_TEMP_DEF_OFFSET;
+
 	return rc;
 }
 
@@ -209,6 +229,17 @@ static int tsens_thermal_zone_register(struct tsens_device *tmdev)
 		return -ENODEV;
 	}
 
+	if (tmdev->min_temp_sensor_id != MIN_TEMP_DEF_OFFSET) {
+		tmdev->min_temp.tmdev = tmdev;
+		tmdev->min_temp.hw_id = tmdev->min_temp_sensor_id;
+		tmdev->min_temp.tzd =
+			devm_thermal_zone_of_sensor_register(
+			&tmdev->pdev->dev, tmdev->min_temp_sensor_id,
+			&tmdev->min_temp, &tsens_tm_min_thermal_zone_ops);
+		if (IS_ERR(tmdev->min_temp.tzd))
+			pr_err("Error registering min temp sensor\n");
+	}
+
 	/* Register virtual thermal sensors. */
 	qti_virtual_sensor_register(&tmdev->pdev->dev);
 
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index d7037a7..cb0e13f 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -898,6 +898,9 @@ void thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev,
 {
 	struct cooling_dev_stats *stats = cdev->stats;
 
+	if (!stats)
+		return;
+
 	spin_lock(&stats->lock);
 
 	if (stats->state == new_state)
@@ -919,6 +922,9 @@ static ssize_t total_trans_show(struct device *dev,
 	struct cooling_dev_stats *stats = cdev->stats;
 	int ret;
 
+	if (!stats)
+		return -ENODEV;
+
 	spin_lock(&stats->lock);
 	ret = sprintf(buf, "%u\n", stats->total_trans);
 	spin_unlock(&stats->lock);
@@ -935,6 +941,9 @@ time_in_state_ms_show(struct device *dev, struct device_attribute *attr,
 	ssize_t len = 0;
 	int i;
 
+	if (!stats)
+		return -ENODEV;
+
 	spin_lock(&stats->lock);
 	update_time_in_state(stats);
 
@@ -953,8 +962,12 @@ reset_store(struct device *dev, struct device_attribute *attr, const char *buf,
 {
 	struct thermal_cooling_device *cdev = to_cooling_device(dev);
 	struct cooling_dev_stats *stats = cdev->stats;
-	int i, states = stats->max_states;
+	int i, states;
 
+	if (!stats)
+		return -ENODEV;
+
+	states = stats->max_states;
 	spin_lock(&stats->lock);
 
 	stats->total_trans = 0;
@@ -978,6 +991,9 @@ static ssize_t trans_table_show(struct device *dev,
 	ssize_t len = 0;
 	int i, j;
 
+	if (!stats)
+		return -ENODEV;
+
 	len += snprintf(buf + len, PAGE_SIZE - len, " From  :    To\n");
 	len += snprintf(buf + len, PAGE_SIZE - len, "       : ");
 	for (i = 0; i < stats->max_states; i++) {
diff --git a/drivers/thermal/tsens.h b/drivers/thermal/tsens.h
index 8ee67c6..bf8768a 100644
--- a/drivers/thermal/tsens.h
+++ b/drivers/thermal/tsens.h
@@ -31,6 +31,7 @@
 #define SLOPE_DEFAULT		3200
 
 #define IPC_LOGPAGES 10
+#define MIN_TEMP_DEF_OFFSET		0xFF
 
 enum tsens_dbg_type {
 	TSENS_DBG_POLL,
@@ -208,14 +209,20 @@ struct tsens_device {
 	const struct tsens_data		*ctrl_data;
 	struct tsens_mtc_sysfs  mtcsys;
 	int				trdy_fail_ctr;
+	struct tsens_sensor		min_temp;
+	u8				min_temp_sensor_id;
 	struct tsens_sensor		sensor[0];
 };
 
-extern const struct tsens_data data_tsens2xxx, data_tsens23xx, data_tsens24xx;
+extern const struct tsens_data data_tsens2xxx, data_tsens23xx, data_tsens24xx,
+		data_tsens26xx;
 extern const struct tsens_data data_tsens14xx, data_tsens14xx_405;
 extern struct list_head tsens_device_list;
 
 extern int calibrate_8937(struct tsens_device *tmdev);
 extern int calibrate_405(struct tsens_device *tmdev);
 
+extern int tsens_2xxx_get_min_temp(
+		struct tsens_sensor *sensor, int *temp);
+
 #endif /* __QCOM_TSENS_H__ */
diff --git a/drivers/thermal/tsens2xxx.c b/drivers/thermal/tsens2xxx.c
index 062e53e..7d040ae 100644
--- a/drivers/thermal/tsens2xxx.c
+++ b/drivers/thermal/tsens2xxx.c
@@ -42,12 +42,15 @@
 #define TSENS_TM_UPPER_LOWER_INT_MASK(n)	((n) + 0x10)
 #define TSENS_TM_UPPER_INT_SET(n)		(1 << (n + 16))
 #define TSENS_TM_SN_CRITICAL_THRESHOLD_MASK	0xfff
+#define TSENS_TM_MIN_TEMP_VALID_BIT		BIT(16)
 #define TSENS_TM_SN_STATUS_VALID_BIT		BIT(21)
 #define TSENS_TM_SN_STATUS_CRITICAL_STATUS	BIT(19)
 #define TSENS_TM_SN_STATUS_UPPER_STATUS		BIT(18)
 #define TSENS_TM_SN_STATUS_LOWER_STATUS		BIT(17)
 #define TSENS_TM_SN_LAST_TEMP_MASK		0xfff
 #define TSENS_TM_CODE_BIT_MASK			0xfff
+#define TSENS_TM_0C_THR_MASK			0xfff
+#define TSENS_TM_0C_THR_OFFSET			12
 #define TSENS_TM_CODE_SIGN_BIT			0x800
 #define TSENS_TM_SCALE_DECI_MILLIDEG		100
 #define TSENS_DEBUG_WDOG_TRIGGER_COUNT		5
@@ -58,6 +61,10 @@
 #define TSENS_TM_TRDY(n)			((n) + 0xe4)
 #define TSENS_TM_TRDY_FIRST_ROUND_COMPLETE	BIT(3)
 #define TSENS_TM_TRDY_FIRST_ROUND_COMPLETE_SHIFT	3
+#define TSENS_TM_0C_INT_STATUS(n)	((n) + 0xe0)
+#define TSENS_TM_MIN_TEMP(n)	((n) + 0xec)
+#define TSENS_TM_0C_THRESHOLDS(n)		((n) + 0x1c)
+#define TSENS_MAX_READ_FAIL			50
 
 static void msm_tsens_convert_temp(int last_temp, int *temp)
 {
@@ -92,7 +99,7 @@ static int tsens2xxx_get_temp(struct tsens_sensor *sensor, int *temp)
 			code, tmdev->trdy_fail_ctr);
 		tmdev->trdy_fail_ctr++;
 
-		if (tmdev->trdy_fail_ctr >= 50) {
+		if (tmdev->trdy_fail_ctr >= TSENS_MAX_READ_FAIL) {
 			if (tmdev->ops->dbg)
 				tmdev->ops->dbg(tmdev, 0,
 					TSENS_DBG_LOG_BUS_ID_DATA, NULL);
@@ -147,6 +154,75 @@ static int tsens2xxx_get_temp(struct tsens_sensor *sensor, int *temp)
 	return 0;
 }
 
+int tsens_2xxx_get_min_temp(struct tsens_sensor *sensor, int *temp)
+{
+	struct tsens_device *tmdev = NULL;
+	unsigned int code;
+	void __iomem *sensor_addr, *trdy;
+	int last_temp = 0, last_temp2 = 0, last_temp3 = 0, valid_bit;
+
+	if (!sensor)
+		return -EINVAL;
+
+	tmdev = sensor->tmdev;
+	trdy = TSENS_TM_TRDY(tmdev->tsens_tm_addr);
+
+	valid_bit = TSENS_TM_MIN_TEMP_VALID_BIT;
+	sensor_addr = TSENS_TM_MIN_TEMP(tmdev->tsens_tm_addr);
+
+	code = readl_relaxed_no_log(trdy);
+	if (!((code & TSENS_TM_TRDY_FIRST_ROUND_COMPLETE) >>
+			TSENS_TM_TRDY_FIRST_ROUND_COMPLETE_SHIFT)) {
+		pr_err("tsens device first round not complete0x%x, ctr is %d\n",
+			code, tmdev->trdy_fail_ctr);
+		tmdev->trdy_fail_ctr++;
+		if (tmdev->trdy_fail_ctr >= TSENS_MAX_READ_FAIL) {
+			if (tmdev->ops->dbg)
+				tmdev->ops->dbg(tmdev, 0,
+					TSENS_DBG_LOG_BUS_ID_DATA, NULL);
+			BUG();
+		}
+		return -ENODATA;
+	}
+
+	tmdev->trdy_fail_ctr = 0;
+
+	code = readl_relaxed_no_log(sensor_addr);
+	last_temp = code & TSENS_TM_SN_LAST_TEMP_MASK;
+	if (code & valid_bit) {
+		msm_tsens_convert_temp(last_temp, temp);
+		goto dbg;
+	}
+
+	code = readl_relaxed_no_log(sensor_addr);
+	last_temp2 = code & TSENS_TM_SN_LAST_TEMP_MASK;
+	if (code & valid_bit) {
+		last_temp = last_temp2;
+		msm_tsens_convert_temp(last_temp, temp);
+		goto dbg;
+	}
+
+	code = readl_relaxed_no_log(sensor_addr);
+	last_temp3 = code & TSENS_TM_SN_LAST_TEMP_MASK;
+	if (code & valid_bit) {
+		last_temp = last_temp3;
+		msm_tsens_convert_temp(last_temp, temp);
+		goto dbg;
+	}
+
+	if (last_temp == last_temp2)
+		last_temp = last_temp2;
+	else if (last_temp2 == last_temp3)
+		last_temp = last_temp3;
+
+	msm_tsens_convert_temp(last_temp, temp);
+
+dbg:
+	TSENS_DBG(tmdev, "Min temp: %d\n", *temp);
+
+	return 0;
+}
+
 static int tsens_tm_activate_trip_type(struct tsens_sensor *tm_sensor,
 			int trip, enum thermal_device_mode mode)
 {
@@ -518,6 +594,31 @@ static irqreturn_t tsens_tm_irq_thread(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
+static irqreturn_t tsens_tm_0C_irq_thread(int irq, void *data)
+{
+	struct tsens_device *tm = data;
+	int status, thrs, set_thr, reset_thr;
+	void __iomem *srot_addr, *addr;
+
+	addr = TSENS_TM_0C_INT_STATUS(tm->tsens_tm_addr);
+	status = readl_relaxed(addr);
+
+	srot_addr = TSENS_CTRL_ADDR(tm->tsens_srot_addr);
+	thrs = readl_relaxed(TSENS_TM_0C_THRESHOLDS(srot_addr));
+
+	msm_tsens_convert_temp(thrs & TSENS_TM_0C_THR_MASK, &reset_thr);
+	msm_tsens_convert_temp(
+		((thrs >> TSENS_TM_0C_THR_OFFSET) &
+				TSENS_TM_0C_THR_MASK), &set_thr);
+
+	if (status)
+		of_thermal_handle_trip_temp(tm->min_temp.tzd, set_thr);
+	else
+		of_thermal_handle_trip_temp(tm->min_temp.tzd, reset_thr);
+
+	return IRQ_HANDLED;
+}
+
 static int tsens2xxx_hw_sensor_en(struct tsens_device *tmdev,
 					u32 sensor_id)
 {
@@ -602,19 +703,26 @@ static int tsens2xxx_hw_init(struct tsens_device *tmdev)
 static const struct tsens_irqs tsens2xxx_irqs[] = {
 	{ "tsens-upper-lower", tsens_tm_irq_thread},
 	{ "tsens-critical", tsens_tm_critical_irq_thread},
+	{ "tsens-0C", tsens_tm_0C_irq_thread},
 };
 
 static int tsens2xxx_register_interrupts(struct tsens_device *tmdev)
 {
 	struct platform_device *pdev;
-	int i, rc;
+	int i, rc, irq_no;
+	unsigned long irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
 
 	if (!tmdev)
 		return -EINVAL;
 
+	if (tmdev->min_temp_sensor_id != MIN_TEMP_DEF_OFFSET)
+		irq_no = ARRAY_SIZE(tsens2xxx_irqs);
+	else
+		irq_no = ARRAY_SIZE(tsens2xxx_irqs) - 1;
+
 	pdev = tmdev->pdev;
 
-	for (i = 0; i < ARRAY_SIZE(tsens2xxx_irqs); i++) {
+	for (i = 0; i < irq_no; i++) {
 		int irq;
 
 		irq = platform_get_irq_byname(pdev, tsens2xxx_irqs[i].name);
@@ -624,10 +732,12 @@ static int tsens2xxx_register_interrupts(struct tsens_device *tmdev)
 			return irq;
 		}
 
+		if (i == 2)
+			irqflags = IRQF_TRIGGER_RISING | IRQF_ONESHOT;
+
 		rc = devm_request_threaded_irq(&pdev->dev, irq, NULL,
 				tsens2xxx_irqs[i].handler,
-				IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
-				tsens2xxx_irqs[i].name, tmdev);
+				irqflags, tsens2xxx_irqs[i].name, tmdev);
 		if (rc) {
 			dev_err(&pdev->dev, "failed to get irq %s\n",
 					tsens2xxx_irqs[i].name);
@@ -677,3 +787,14 @@ const struct tsens_data data_tsens24xx = {
 	.ops				= &ops_tsens2xxx,
 	.mtc				= false,
 };
+
+const struct tsens_data data_tsens26xx = {
+	.cycle_monitor			= true,
+	.cycle_compltn_monitor_mask	= 1,
+	.wd_bark			= true,
+	.wd_bark_mask			= 0,
+	.ops				= &ops_tsens2xxx,
+	.mtc				= false,
+	.ver_major			= 2,
+	.ver_minor			= 6,
+};
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 4d147f89..7c7217a 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -840,8 +840,13 @@ int tty_ldisc_init(struct tty_struct *tty)
  */
 void tty_ldisc_deinit(struct tty_struct *tty)
 {
-	if (tty->ldisc)
+	if (tty->ldisc) {
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+		if (tty->echo_delayed_work.work.func)
+			cancel_delayed_work_sync(&tty->echo_delayed_work);
+#endif
 		tty_ldisc_put(tty->ldisc);
+	}
 	tty->ldisc = NULL;
 }
 
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index b989ca2..2f03729 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -116,8 +116,7 @@ static void __ldsem_wake_readers(struct ld_semaphore *sem)
 
 	list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
 		tsk = waiter->task;
-		smp_mb();
-		waiter->task = NULL;
+		smp_store_release(&waiter->task, NULL);
 		wake_up_process(tsk);
 		put_task_struct(tsk);
 	}
@@ -217,7 +216,7 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout)
 	for (;;) {
 		set_current_state(TASK_UNINTERRUPTIBLE);
 
-		if (!waiter.task)
+		if (!smp_load_acquire(&waiter.task))
 			break;
 		if (!timeout)
 			break;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 5b442bc..59675cc 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1333,10 +1333,6 @@ static int acm_probe(struct usb_interface *intf,
 	tty_port_init(&acm->port);
 	acm->port.ops = &acm_port_ops;
 
-	minor = acm_alloc_minor(acm);
-	if (minor < 0)
-		goto alloc_fail1;
-
 	ctrlsize = usb_endpoint_maxp(epctrl);
 	readsize = usb_endpoint_maxp(epread) *
 				(quirks == SINGLE_RX_URB ? 1 : 2);
@@ -1344,6 +1340,13 @@ static int acm_probe(struct usb_interface *intf,
 	acm->writesize = usb_endpoint_maxp(epwrite) * 20;
 	acm->control = control_interface;
 	acm->data = data_interface;
+
+	usb_get_intf(acm->control); /* undone in destruct() */
+
+	minor = acm_alloc_minor(acm);
+	if (minor < 0)
+		goto alloc_fail1;
+
 	acm->minor = minor;
 	acm->dev = usb_dev;
 	if (h.usb_cdc_acm_descriptor)
@@ -1490,7 +1493,6 @@ static int acm_probe(struct usb_interface *intf,
 	usb_driver_claim_interface(&acm_driver, data_interface, acm);
 	usb_set_intfdata(data_interface, acm);
 
-	usb_get_intf(control_interface);
 	tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor,
 			&control_interface->dev);
 	if (IS_ERR(tty_dev)) {
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index ffccd40..29c6414 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1792,8 +1792,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
 	return 0;
 
  error:
-	if (as && as->usbm)
-		dec_usb_memory_use_count(as->usbm, &as->usbm->urb_use_count);
 	kfree(isopkt);
 	kfree(dr);
 	if (as)
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 65de6f7..558890a 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -193,9 +193,10 @@ int usb_register_dev(struct usb_interface *intf,
 		intf->minor = minor;
 		break;
 	}
-	up_write(&minor_rwsem);
-	if (intf->minor < 0)
+	if (intf->minor < 0) {
+		up_write(&minor_rwsem);
 		return -EXFULL;
+	}
 
 	/* create a usb class device for this usb interface */
 	snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
@@ -203,12 +204,11 @@ int usb_register_dev(struct usb_interface *intf,
 				      MKDEV(USB_MAJOR, minor), class_driver,
 				      "%s", kbasename(name));
 	if (IS_ERR(intf->usb_dev)) {
-		down_write(&minor_rwsem);
 		usb_minors[minor] = NULL;
 		intf->minor = -1;
-		up_write(&minor_rwsem);
 		retval = PTR_ERR(intf->usb_dev);
 	}
+	up_write(&minor_rwsem);
 	return retval;
 }
 EXPORT_SYMBOL_GPL(usb_register_dev);
@@ -234,12 +234,12 @@ void usb_deregister_dev(struct usb_interface *intf,
 		return;
 
 	dev_dbg(&intf->dev, "removing %d minor\n", intf->minor);
+	device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
 
 	down_write(&minor_rwsem);
 	usb_minors[intf->minor] = NULL;
 	up_write(&minor_rwsem);
 
-	device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
 	intf->usb_dev = NULL;
 	intf->minor = -1;
 	destroy_usb_class();
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 4020ce8d..0d3fd20 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -2211,14 +2211,14 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
 				(struct usb_cdc_dmm_desc *)buffer;
 			break;
 		case USB_CDC_MDLM_TYPE:
-			if (elength < sizeof(struct usb_cdc_mdlm_desc *))
+			if (elength < sizeof(struct usb_cdc_mdlm_desc))
 				goto next_desc;
 			if (desc)
 				return -EINVAL;
 			desc = (struct usb_cdc_mdlm_desc *)buffer;
 			break;
 		case USB_CDC_MDLM_DETAIL_TYPE:
-			if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *))
+			if (elength < sizeof(struct usb_cdc_mdlm_detail_desc))
 				goto next_desc;
 			if (detail)
 				return -EINVAL;
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 82c761d..1b0f981 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -145,6 +145,10 @@ void dwc3_en_sleep_mode(struct dwc3 *dwc)
 	reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
 	reg |= DWC3_GUSB2PHYCFG_ENBLSLPM;
 	dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+
+	reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
+	reg |= DWC3_GUCTL1_L1_SUSP_THRLD_EN_FOR_HOST;
+	dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
 }
 
 void dwc3_dis_sleep_mode(struct dwc3 *dwc)
@@ -154,6 +158,10 @@ void dwc3_dis_sleep_mode(struct dwc3 *dwc)
 	reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
 	reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
 	dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+
+	reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
+	reg &= ~DWC3_GUCTL1_L1_SUSP_THRLD_EN_FOR_HOST;
+	dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
 }
 
 void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index c80f5d2..d2d3e16 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -258,6 +258,7 @@
 #define DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS	BIT(28)
 #define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW	BIT(24)
 #define DWC3_GUCTL1_IP_GAP_ADD_ON(n)	(n << 21)
+#define DWC3_GUCTL1_L1_SUSP_THRLD_EN_FOR_HOST	BIT(8)
 
 /* Global Status Register */
 #define DWC3_GSTS_OTG_IP	BIT(10)
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 413a44e..43dc062 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -225,8 +225,8 @@ int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc, struct dwc3_ep *dep)
 			|| usb_endpoint_xfer_isoc(dep->endpoint.desc))
 		mult = 3;
 
-	if ((dep->endpoint.maxburst > 2) &&
-			dep->endpoint.ep_type == EP_TYPE_GSI
+	if ((dep->endpoint.maxburst > 6) &&
+			usb_endpoint_xfer_bulk(dep->endpoint.desc)
 			&& dwc3_is_usb31(dwc))
 		mult = 6;
 
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index cdb6d62..c0d0449 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -1339,7 +1339,6 @@ static ssize_t gsi_ctrl_dev_write(struct file *fp, const char __user *buf,
 	unsigned long flags;
 	struct gsi_ctrl_pkt *cpkt;
 	struct gsi_ctrl_port *c_port;
-	struct usb_request *req;
 	enum ipa_usb_teth_prot prot_id =
 		*(enum ipa_usb_teth_prot *)(fp->private_data);
 	struct gsi_inst_status *inst_cur = &inst_status[prot_id];
@@ -1358,13 +1357,6 @@ static ssize_t gsi_ctrl_dev_write(struct file *fp, const char __user *buf,
 
 	gsi = inst_cur->opts->gsi;
 	c_port = &gsi->c_port;
-	req = c_port->notify_req;
-
-	if (!c_port || !req || !req->buf) {
-		log_event_err("%s: c_port %pK req %p req->buf %p",
-			__func__, c_port, req, req ? req->buf : req);
-		return -ENODEV;
-	}
 
 	if (!count || count > GSI_MAX_CTRL_PKT_SIZE) {
 		log_event_err("error: ctrl pkt length %zu", count);
diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h
index 311e7ce..ae698bf 100644
--- a/drivers/usb/gadget/function/f_gsi.h
+++ b/drivers/usb/gadget/function/f_gsi.h
@@ -450,7 +450,7 @@ static struct usb_ss_ep_comp_descriptor rmnet_gsi_ss_in_comp_desc = {
 	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
 
 	/* the following 2 values can be tweaked if necessary */
-	.bMaxBurst =		2,
+	.bMaxBurst =		6,
 	/* .bmAttributes =	0, */
 };
 
@@ -705,7 +705,7 @@ static struct usb_ss_ep_comp_descriptor rndis_gsi_ss_bulk_comp_desc = {
 	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
 
 	/* the following 2 values can be tweaked if necessary */
-	.bMaxBurst =		2,
+	.bMaxBurst =		6,
 	/* .bmAttributes =	0, */
 };
 
@@ -990,7 +990,7 @@ static struct usb_ss_ep_comp_descriptor mbim_gsi_ss_in_comp_desc = {
 	.bDescriptorType =      USB_DT_SS_ENDPOINT_COMP,
 
 	/* the following 2 values can be tweaked if necessary */
-	.bMaxBurst =         2,
+	.bMaxBurst =         6,
 	/* .bmAttributes =      0, */
 };
 
@@ -1290,7 +1290,7 @@ static struct usb_ss_ep_comp_descriptor ecm_gsi_ss_in_comp_desc = {
 	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
 
 	/* the following 2 values can be tweaked if necessary */
-	.bMaxBurst =         2,
+	.bMaxBurst =         6,
 	/* .bmAttributes =      0, */
 };
 
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index fea02c7..a5254e8 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -19,6 +19,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/sizes.h>
 #include <linux/slab.h>
+#include <linux/string.h>
 #include <linux/sys_soc.h>
 #include <linux/uaccess.h>
 #include <linux/usb/ch9.h>
@@ -2378,9 +2379,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
 	if (usb3->forced_b_device)
 		return -EBUSY;
 
-	if (!strncmp(buf, "host", strlen("host")))
+	if (sysfs_streq(buf, "host"))
 		new_mode_is_host = true;
-	else if (!strncmp(buf, "peripheral", strlen("peripheral")))
+	else if (sysfs_streq(buf, "peripheral"))
 		new_mode_is_host = false;
 	else
 		return -EINVAL;
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index 671bce1..8616c52 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -238,10 +238,15 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
 	 * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
 	 * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
 	 * xhci_gen_setup().
+	 *
+	 * And, since the firmware/internal CPU control the USBSTS.STS_HALT
+	 * and the process speed is down when the roothub port enters U3,
+	 * long delay for the handshake of STS_HALT is neeed in xhci_suspend().
 	 */
 	if (xhci_rcar_is_gen2(hcd->self.controller) ||
-			xhci_rcar_is_gen3(hcd->self.controller))
-		xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
+			xhci_rcar_is_gen3(hcd->self.controller)) {
+		xhci->quirks |= XHCI_NO_64BIT_SUPPORT | XHCI_SLOW_SUSPEND;
+	}
 
 	if (!xhci_rcar_wait_for_pll_active(hcd))
 		return -ETIMEDOUT;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 438d8ce..1d8c40f 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -450,7 +450,7 @@ struct xhci_op_regs {
  * e.g. not program values > '4' if BLC = '1' and a HIRD device is attached,
  * or not program values < '4' if BLC = '0' and a BESL device is attached.
  */
-#define XHCI_DEFAULT_BESL	4
+#define XHCI_DEFAULT_BESL	0
 
 /*
  * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index c2991b8..55db0fc 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -866,19 +866,20 @@ static void iowarrior_disconnect(struct usb_interface *interface)
 	dev = usb_get_intfdata(interface);
 	mutex_lock(&iowarrior_open_disc_lock);
 	usb_set_intfdata(interface, NULL);
+	/* prevent device read, write and ioctl */
+	dev->present = 0;
 
 	minor = dev->minor;
+	mutex_unlock(&iowarrior_open_disc_lock);
+	/* give back our minor - this will call close() locks need to be dropped at this point*/
 
-	/* give back our minor */
 	usb_deregister_dev(interface, &iowarrior_class);
 
 	mutex_lock(&dev->mutex);
 
 	/* prevent device read, write and ioctl */
-	dev->present = 0;
 
 	mutex_unlock(&dev->mutex);
-	mutex_unlock(&iowarrior_open_disc_lock);
 
 	if (dev->opened) {
 		/* There is a process that holds a filedescriptor to the device ,
diff --git a/drivers/usb/misc/ssusb-redriver-nb7vpq904m.c b/drivers/usb/misc/ssusb-redriver-nb7vpq904m.c
index 3c97e40..39745c1 100644
--- a/drivers/usb/misc/ssusb-redriver-nb7vpq904m.c
+++ b/drivers/usb/misc/ssusb-redriver-nb7vpq904m.c
@@ -445,8 +445,7 @@ static int ssusb_redriver_vbus_notifier(struct notifier_block *nb,
 
 	redriver->vbus_active = event;
 
-	if (redriver->vbus_active)
-		queue_work(redriver->redriver_wq, &redriver->config_work);
+	queue_work(redriver->redriver_wq, &redriver->config_work);
 
 	return NOTIFY_DONE;
 }
@@ -466,8 +465,7 @@ static int ssusb_redriver_id_notifier(struct notifier_block *nb,
 
 	redriver->host_active = host_active;
 
-	if (redriver->host_active)
-		queue_work(redriver->redriver_wq, &redriver->config_work);
+	queue_work(redriver->redriver_wq, &redriver->config_work);
 
 	return NOTIFY_DONE;
 }
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 7b306aa..6715a12 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -92,7 +92,6 @@ static void yurex_delete(struct kref *kref)
 
 	dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
-	usb_put_dev(dev->udev);
 	if (dev->cntl_urb) {
 		usb_kill_urb(dev->cntl_urb);
 		kfree(dev->cntl_req);
@@ -108,6 +107,7 @@ static void yurex_delete(struct kref *kref)
 				dev->int_buffer, dev->urb->transfer_dma);
 		usb_free_urb(dev->urb);
 	}
+	usb_put_dev(dev->udev);
 	kfree(dev);
 }
 
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index f3ff59d..4d52f1a 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -442,7 +442,6 @@ struct usbpd {
 	struct regulator	*vconn;
 	bool			vbus_enabled;
 	bool			vconn_enabled;
-	bool			vconn_is_external;
 
 	u8			tx_msgid[SOPII_MSG + 1];
 	u8			rx_msgid[SOPII_MSG + 1];
@@ -516,6 +515,21 @@ enum plug_orientation usbpd_get_plug_orientation(struct usbpd *pd)
 }
 EXPORT_SYMBOL(usbpd_get_plug_orientation);
 
+static unsigned int get_connector_type(struct usbpd *pd)
+{
+	int ret;
+	union power_supply_propval val;
+
+	ret = power_supply_get_property(pd->usb_psy,
+		POWER_SUPPLY_PROP_CONNECTOR_TYPE, &val);
+	if (ret) {
+		dev_err(&pd->dev, "Unable to read CONNECTOR TYPE: %d\n", ret);
+		return ret;
+	}
+
+	return val.intval;
+}
+
 static inline void stop_usb_host(struct usbpd *pd)
 {
 	extcon_set_state_sync(pd->extcon, EXTCON_USB_HOST, 0);
@@ -831,11 +845,6 @@ static int pd_select_pdo(struct usbpd *pd, int pdo_pos, int uv, int ua)
 		return -ENOTSUPP;
 	}
 
-	/* Can't sink more than 5V if VCONN is sourced from the VBUS input */
-	if (pd->vconn_enabled && !pd->vconn_is_external &&
-			pd->requested_voltage > 5000000)
-		return -ENOTSUPP;
-
 	pd->requested_current = curr;
 	pd->requested_pdo = pdo_pos;
 
@@ -899,6 +908,7 @@ static void pd_send_hard_reset(struct usbpd *pd)
 	pd->hard_reset_count++;
 	pd_phy_signal(HARD_RESET_SIG);
 	pd->in_pr_swap = false;
+	pd->pd_connected = false;
 	power_supply_set_property(pd->usb_psy, POWER_SUPPLY_PROP_PR_SWAP, &val);
 }
 
@@ -2213,6 +2223,10 @@ static void handle_state_src_send_capabilities(struct usbpd *pd,
 	ret = pd_send_msg(pd, MSG_SOURCE_CAPABILITIES, default_src_caps,
 			ARRAY_SIZE(default_src_caps), SOP_MSG);
 	if (ret) {
+		if (pd->pd_connected) {
+			usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+			return;
+		}
 		/*
 		 * Technically this is PE_SRC_Discovery, but we can
 		 * handle it by setting a timer to come back to the
@@ -2850,21 +2864,6 @@ static bool handle_ctrl_snk_ready(struct usbpd *pd, struct rx_msg *rx_msg)
 		usbpd_set_state(pd, PE_PRS_SNK_SRC_TRANSITION_TO_OFF);
 		break;
 	case MSG_VCONN_SWAP:
-		/*
-		 * if VCONN is connected to VBUS, make sure we are
-		 * not in high voltage contract, otherwise reject.
-		 */
-		if (!pd->vconn_is_external &&
-				(pd->requested_voltage > 5000000)) {
-			ret = pd_send_msg(pd, MSG_REJECT, NULL, 0,
-					SOP_MSG);
-			if (ret)
-				usbpd_set_state(pd,
-						PE_SEND_SOFT_RESET);
-
-			break;
-		}
-
 		ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
 		if (ret) {
 			usbpd_set_state(pd, PE_SEND_SOFT_RESET);
@@ -4673,9 +4672,6 @@ struct usbpd *usbpd_create(struct device *parent)
 	extcon_set_property_capability(pd->extcon, EXTCON_USB_HOST,
 			EXTCON_PROP_USB_SS);
 
-	pd->vconn_is_external = device_property_present(parent,
-					"qcom,vconn-uses-external-source");
-
 	pd->num_sink_caps = device_property_read_u32_array(parent,
 			"qcom,default-sink-caps", NULL, 0);
 	if (pd->num_sink_caps > 0) {
@@ -4727,10 +4723,17 @@ struct usbpd *usbpd_create(struct device *parent)
 	pd->typec_caps.port_type_set = usbpd_typec_port_type_set;
 	pd->partner_desc.identity = &pd->partner_identity;
 
-	pd->typec_port = typec_register_port(parent, &pd->typec_caps);
-	if (IS_ERR(pd->typec_port)) {
-		usbpd_err(&pd->dev, "could not register typec port\n");
+	ret = get_connector_type(pd);
+	if (ret < 0)
 		goto put_psy;
+
+	/* For non-TypeC connector, it will be handled elsewhere */
+	if (ret != POWER_SUPPLY_CONNECTOR_MICRO_USB) {
+		pd->typec_port = typec_register_port(parent, &pd->typec_caps);
+		if (IS_ERR(pd->typec_port)) {
+			usbpd_err(&pd->dev, "could not register typec port\n");
+			goto put_psy;
+		}
 	}
 
 	pd->current_pr = PR_NONE;
diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile
index 55ba3db..f24b20f 100644
--- a/drivers/usb/phy/Makefile
+++ b/drivers/usb/phy/Makefile
@@ -25,5 +25,6 @@
 obj-$(CONFIG_USB_ULPI)			+= phy-ulpi.o
 obj-$(CONFIG_USB_ULPI_VIEWPORT)		+= phy-ulpi-viewport.o
 obj-$(CONFIG_KEYSTONE_USB_PHY)		+= phy-keystone.o
+obj-$(CONFIG_MSM_QUSB_PHY)		+= phy-msm-qusb.o
 obj-$(CONFIG_USB_MSM_SSPHY_QMP)     	+= phy-msm-ssusb-qmp.o
 obj-$(CONFIG_MSM_HSUSB_PHY)		+= phy-msm-snps-hs.o
diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c
new file mode 100644
index 0000000..93fc8fd
--- /dev/null
+++ b/drivers/usb/phy/phy-msm-qusb.c
@@ -0,0 +1,1173 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/usb/phy.h>
+#include <linux/reset.h>
+
+#define QUSB2PHY_PLL_STATUS	0x38
+#define QUSB2PHY_PLL_LOCK	BIT(5)
+
+#define QUSB2PHY_PORT_QC1	0x70
+#define VDM_SRC_EN		BIT(4)
+#define VDP_SRC_EN		BIT(2)
+
+#define QUSB2PHY_PORT_QC2	0x74
+#define RDM_UP_EN		BIT(1)
+#define RDP_UP_EN		BIT(3)
+#define RPUM_LOW_EN		BIT(4)
+#define RPUP_LOW_EN		BIT(5)
+
+#define QUSB2PHY_PORT_POWERDOWN		0xB4
+#define CLAMP_N_EN			BIT(5)
+#define FREEZIO_N			BIT(1)
+#define POWER_DOWN			BIT(0)
+
+#define QUSB2PHY_PORT_TEST_CTRL		0xB8
+
+#define QUSB2PHY_PWR_CTRL1		0x210
+#define PWR_CTRL1_CLAMP_N_EN		BIT(1)
+#define PWR_CTRL1_POWR_DOWN		BIT(0)
+
+#define QUSB2PHY_PLL_COMMON_STATUS_ONE	0x1A0
+#define CORE_READY_STATUS		BIT(0)
+
+#define QUSB2PHY_PORT_UTMI_CTRL1	0xC0
+#define TERM_SELECT			BIT(4)
+#define XCVR_SELECT_FS			BIT(2)
+#define OP_MODE_NON_DRIVE		BIT(0)
+
+#define QUSB2PHY_PORT_UTMI_CTRL2	0xC4
+#define UTMI_ULPI_SEL			BIT(7)
+#define UTMI_TEST_MUX_SEL		BIT(6)
+
+#define QUSB2PHY_PLL_TEST		0x04
+#define CLK_REF_SEL			BIT(7)
+
+#define QUSB2PHY_PORT_TUNE1             0x80
+#define QUSB2PHY_PORT_TUNE2             0x84
+#define QUSB2PHY_PORT_TUNE3             0x88
+#define QUSB2PHY_PORT_TUNE4             0x8C
+#define QUSB2PHY_PORT_TUNE5             0x90
+
+/* Get TUNE2's high nibble value read from efuse */
+#define TUNE2_HIGH_NIBBLE_VAL(val, pos, mask)	((val >> pos) & mask)
+
+#define QUSB2PHY_PORT_INTR_CTRL         0xBC
+#define CHG_DET_INTR_EN                 BIT(4)
+#define DMSE_INTR_HIGH_SEL              BIT(3)
+#define DMSE_INTR_EN                    BIT(2)
+#define DPSE_INTR_HIGH_SEL              BIT(1)
+#define DPSE_INTR_EN                    BIT(0)
+
+#define QUSB2PHY_PORT_UTMI_STATUS	0xF4
+#define LINESTATE_DP			BIT(0)
+#define LINESTATE_DM			BIT(1)
+
+
+#define QUSB2PHY_1P8_VOL_MIN           1800000 /* uV */
+#define QUSB2PHY_1P8_VOL_MAX           1800000 /* uV */
+#define QUSB2PHY_1P8_HPM_LOAD          30000   /* uA */
+
+#define QUSB2PHY_3P3_VOL_MIN		3075000 /* uV */
+#define QUSB2PHY_3P3_VOL_MAX		3200000 /* uV */
+#define QUSB2PHY_3P3_HPM_LOAD		30000	/* uA */
+
+#define QUSB2PHY_REFCLK_ENABLE		BIT(0)
+
+#define HSTX_TRIMSIZE			4
+
+struct qusb_phy {
+	struct usb_phy		phy;
+	void __iomem		*base;
+	void __iomem		*tune2_efuse_reg;
+	void __iomem		*ref_clk_base;
+	void __iomem		*tcsr_clamp_dig_n;
+	void __iomem		*tcsr_conn_box_spare;
+
+	struct clk		*ref_clk_src;
+	struct clk		*ref_clk;
+	struct clk		*cfg_ahb_clk;
+	struct reset_control	*phy_reset;
+	struct clk		*iface_clk;
+	struct clk		*core_clk;
+
+	struct regulator	*gdsc;
+	struct regulator	*vdd;
+	struct regulator	*vdda33;
+	struct regulator	*vdda18;
+	int			vdd_levels[3]; /* none, low, high */
+	int			init_seq_len;
+	int			*qusb_phy_init_seq;
+	u32			major_rev;
+	u32			usb_hs_ac_bitmask;
+	u32			usb_hs_ac_value;
+
+	u32			tune2_val;
+	int			tune2_efuse_bit_pos;
+	int			tune2_efuse_num_of_bits;
+	int			tune2_efuse_correction;
+
+	bool			cable_connected;
+	bool			suspended;
+	bool			ulpi_mode;
+	bool			dpdm_enable;
+	bool			is_se_clk;
+
+	struct regulator_desc	dpdm_rdesc;
+	struct regulator_dev	*dpdm_rdev;
+
+	bool			put_into_high_z_state;
+	struct mutex		phy_lock;
+
+	/* debugfs entries */
+	struct dentry		*root;
+	u8			tune1;
+	u8			tune2;
+	u8			tune3;
+	u8			tune4;
+	u8			tune5;
+};
+
+static void qusb_phy_enable_clocks(struct qusb_phy *qphy, bool on)
+{
+	dev_dbg(qphy->phy.dev, "%s(): on:%d\n", __func__, on);
+
+	if (on) {
+		clk_prepare_enable(qphy->ref_clk_src);
+		clk_prepare_enable(qphy->ref_clk);
+		clk_prepare_enable(qphy->iface_clk);
+		clk_prepare_enable(qphy->core_clk);
+		clk_prepare_enable(qphy->cfg_ahb_clk);
+	} else {
+		clk_disable_unprepare(qphy->cfg_ahb_clk);
+		/*
+		 * FSM depedency beween iface_clk and core_clk.
+		 * Hence turned off core_clk before iface_clk.
+		 */
+		clk_disable_unprepare(qphy->core_clk);
+		clk_disable_unprepare(qphy->iface_clk);
+		clk_disable_unprepare(qphy->ref_clk);
+		clk_disable_unprepare(qphy->ref_clk_src);
+	}
+
+}
+
+static int qusb_phy_gdsc(struct qusb_phy *qphy, bool on)
+{
+	int ret;
+
+	if (IS_ERR_OR_NULL(qphy->gdsc))
+		return -EPERM;
+
+	if (on) {
+		dev_dbg(qphy->phy.dev, "TURNING ON GDSC\n");
+		ret = regulator_enable(qphy->gdsc);
+		if (ret) {
+			dev_err(qphy->phy.dev, "unable to enable gdsc\n");
+			return ret;
+		}
+	} else {
+		dev_dbg(qphy->phy.dev, "TURNING OFF GDSC\n");
+		ret = regulator_disable(qphy->gdsc);
+		if (ret) {
+			dev_err(qphy->phy.dev, "unable to disable gdsc\n");
+			return ret;
+		}
+	}
+
+	return ret;
+}
+
+static int qusb_phy_config_vdd(struct qusb_phy *qphy, int high)
+{
+	int min, ret;
+
+	min = high ? 1 : 0; /* low or none? */
+	ret = regulator_set_voltage(qphy->vdd, qphy->vdd_levels[min],
+						qphy->vdd_levels[2]);
+	if (ret) {
+		dev_err(qphy->phy.dev, "unable to set voltage for qusb vdd\n");
+		return ret;
+	}
+
+	dev_dbg(qphy->phy.dev, "min_vol:%d max_vol:%d\n",
+			qphy->vdd_levels[min], qphy->vdd_levels[2]);
+	return ret;
+}
+
+static int qusb_phy_enable_power(struct qusb_phy *qphy, bool on)
+{
+	int ret = 0;
+
+	dev_dbg(qphy->phy.dev, "%s turn %s regulators\n",
+			__func__, on ? "on" : "off");
+
+	if (!on)
+		goto disable_vdda33;
+
+	ret = qusb_phy_config_vdd(qphy, true);
+	if (ret) {
+		dev_err(qphy->phy.dev, "Unable to config VDD:%d\n",
+							ret);
+		goto err_vdd;
+	}
+
+	ret = regulator_enable(qphy->vdd);
+	if (ret) {
+		dev_err(qphy->phy.dev, "Unable to enable VDD\n");
+		goto unconfig_vdd;
+	}
+
+	ret = regulator_set_load(qphy->vdda18, QUSB2PHY_1P8_HPM_LOAD);
+	if (ret < 0) {
+		dev_err(qphy->phy.dev, "Unable to set HPM of vdda18:%d\n", ret);
+		goto disable_vdd;
+	}
+
+	ret = regulator_set_voltage(qphy->vdda18, QUSB2PHY_1P8_VOL_MIN,
+						QUSB2PHY_1P8_VOL_MAX);
+	if (ret) {
+		dev_err(qphy->phy.dev,
+				"Unable to set voltage for vdda18:%d\n", ret);
+		goto put_vdda18_lpm;
+	}
+
+	ret = regulator_enable(qphy->vdda18);
+	if (ret) {
+		dev_err(qphy->phy.dev, "Unable to enable vdda18:%d\n", ret);
+		goto unset_vdda18;
+	}
+
+	ret = regulator_set_load(qphy->vdda33, QUSB2PHY_3P3_HPM_LOAD);
+	if (ret < 0) {
+		dev_err(qphy->phy.dev, "Unable to set HPM of vdda33:%d\n", ret);
+		goto disable_vdda18;
+	}
+
+	ret = regulator_set_voltage(qphy->vdda33, QUSB2PHY_3P3_VOL_MIN,
+						QUSB2PHY_3P3_VOL_MAX);
+	if (ret) {
+		dev_err(qphy->phy.dev,
+				"Unable to set voltage for vdda33:%d\n", ret);
+		goto put_vdda33_lpm;
+	}
+
+	ret = regulator_enable(qphy->vdda33);
+	if (ret) {
+		dev_err(qphy->phy.dev, "Unable to enable vdda33:%d\n", ret);
+		goto unset_vdd33;
+	}
+
+	pr_debug("%s(): QUSB PHY's regulators are turned ON.\n", __func__);
+	return ret;
+
+disable_vdda33:
+	ret = regulator_disable(qphy->vdda33);
+	if (ret)
+		dev_err(qphy->phy.dev, "Unable to disable vdda33:%d\n", ret);
+
+unset_vdd33:
+	ret = regulator_set_voltage(qphy->vdda33, 0, QUSB2PHY_3P3_VOL_MAX);
+	if (ret)
+		dev_err(qphy->phy.dev,
+			"Unable to set (0) voltage for vdda33:%d\n", ret);
+
+put_vdda33_lpm:
+	ret = regulator_set_load(qphy->vdda33, 0);
+	if (ret < 0)
+		dev_err(qphy->phy.dev, "Unable to set (0) HPM of vdda33\n");
+
+disable_vdda18:
+	ret = regulator_disable(qphy->vdda18);
+	if (ret)
+		dev_err(qphy->phy.dev, "Unable to disable vdda18:%d\n", ret);
+
+unset_vdda18:
+	ret = regulator_set_voltage(qphy->vdda18, 0, QUSB2PHY_1P8_VOL_MAX);
+	if (ret)
+		dev_err(qphy->phy.dev,
+			"Unable to set (0) voltage for vdda18:%d\n", ret);
+
+put_vdda18_lpm:
+	ret = regulator_set_load(qphy->vdda18, 0);
+	if (ret < 0)
+		dev_err(qphy->phy.dev, "Unable to set LPM of vdda18\n");
+
+disable_vdd:
+	ret = regulator_disable(qphy->vdd);
+	if (ret)
+		dev_err(qphy->phy.dev, "Unable to disable vdd:%d\n",
+								ret);
+
+unconfig_vdd:
+	ret = qusb_phy_config_vdd(qphy, false);
+	if (ret)
+		dev_err(qphy->phy.dev, "Unable unconfig VDD:%d\n",
+								ret);
+err_vdd:
+	dev_dbg(qphy->phy.dev, "QUSB PHY's regulators are turned OFF.\n");
+
+	return ret;
+}
+
+static void qusb_phy_get_tune2_param(struct qusb_phy *qphy)
+{
+	u32 bit_mask = 1;
+	u8 reg_val;
+
+	pr_debug("%s(): num_of_bits:%d bit_pos:%d\n", __func__,
+				qphy->tune2_efuse_num_of_bits,
+				qphy->tune2_efuse_bit_pos);
+
+	/* get bit mask based on number of bits to use with efuse reg */
+	bit_mask = (bit_mask << qphy->tune2_efuse_num_of_bits) - 1;
+
+	/*
+	 * Read EFUSE register having TUNE2 parameter's high nibble.
+	 * If efuse register shows value as 0x0, then use previous value
+	 * as it is. Otherwise use efuse register based value for this purpose.
+	 */
+	if (qphy->tune2_efuse_num_of_bits < HSTX_TRIMSIZE) {
+		qphy->tune2_val =
+		     TUNE2_HIGH_NIBBLE_VAL(readl_relaxed(qphy->tune2_efuse_reg),
+		     qphy->tune2_efuse_bit_pos, bit_mask);
+		bit_mask =
+		     (1 << (HSTX_TRIMSIZE - qphy->tune2_efuse_num_of_bits)) - 1;
+		qphy->tune2_val |=
+		 TUNE2_HIGH_NIBBLE_VAL(readl_relaxed(qphy->tune2_efuse_reg + 4),
+				0, bit_mask) << qphy->tune2_efuse_num_of_bits;
+	} else {
+		qphy->tune2_val = readl_relaxed(qphy->tune2_efuse_reg);
+		qphy->tune2_val = TUNE2_HIGH_NIBBLE_VAL(qphy->tune2_val,
+					qphy->tune2_efuse_bit_pos, bit_mask);
+	}
+
+	pr_debug("%s(): efuse based tune2 value:%d\n",
+				__func__, qphy->tune2_val);
+
+	/* Update higher nibble of TUNE2 value for better rise/fall times */
+	if (qphy->tune2_efuse_correction && qphy->tune2_val) {
+		if (qphy->tune2_efuse_correction > 5 ||
+				qphy->tune2_efuse_correction < -10)
+			pr_warn("Correction value is out of range : %d\n",
+					qphy->tune2_efuse_correction);
+		else
+			qphy->tune2_val = qphy->tune2_val +
+						qphy->tune2_efuse_correction;
+	}
+
+	reg_val = readb_relaxed(qphy->base + QUSB2PHY_PORT_TUNE2);
+	if (qphy->tune2_val) {
+		reg_val  &= 0x0f;
+		reg_val |= (qphy->tune2_val << 4);
+	}
+
+	qphy->tune2_val = reg_val;
+}
+
+static void qusb_phy_write_seq(void __iomem *base, u32 *seq, int cnt,
+		unsigned long delay)
+{
+	int i;
+
+	pr_debug("Seq count:%d\n", cnt);
+	for (i = 0; i < cnt; i = i+2) {
+		pr_debug("write 0x%02x to 0x%02x\n", seq[i], seq[i+1]);
+		writel_relaxed(seq[i], base + seq[i+1]);
+		if (delay)
+			usleep_range(delay, (delay + 2000));
+	}
+}
+
+static int qusb_phy_init(struct usb_phy *phy)
+{
+	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+	int ret, reset_val = 0;
+	u8 reg;
+	bool pll_lock_fail = false;
+
+	dev_dbg(phy->dev, "%s\n", __func__);
+
+	/*
+	 * ref clock is enabled by default after power on reset. Linux clock
+	 * driver will disable this clock as part of late init if peripheral
+	 * driver(s) does not explicitly votes for it. Linux clock driver also
+	 * does not disable the clock until late init even if peripheral
+	 * driver explicitly requests it and cannot defer the probe until late
+	 * init. Hence, Explicitly disable the clock using register write to
+	 * allow QUSB PHY PLL to lock properly.
+	 */
+	if (qphy->ref_clk_base) {
+		writel_relaxed((readl_relaxed(qphy->ref_clk_base) &
+					~QUSB2PHY_REFCLK_ENABLE),
+					qphy->ref_clk_base);
+		/* Make sure that above write complete to get ref clk OFF */
+		wmb();
+	}
+
+	/* Perform phy reset */
+	ret = reset_control_assert(qphy->phy_reset);
+	if (ret)
+		dev_err(phy->dev, "%s: phy_reset assert failed\n", __func__);
+	usleep_range(100, 150);
+	ret = reset_control_deassert(qphy->phy_reset);
+	if (ret)
+		dev_err(phy->dev, "%s: phy_reset deassert failed\n", __func__);
+
+	/* Disable the PHY */
+	if (qphy->major_rev < 2)
+		writel_relaxed(CLAMP_N_EN | FREEZIO_N | POWER_DOWN,
+				qphy->base + QUSB2PHY_PORT_POWERDOWN);
+	else
+		writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) |
+				PWR_CTRL1_POWR_DOWN,
+				qphy->base + QUSB2PHY_PWR_CTRL1);
+
+	/* configure for ULPI mode if requested */
+	if (qphy->ulpi_mode)
+		writel_relaxed(0x0, qphy->base + QUSB2PHY_PORT_UTMI_CTRL2);
+
+	/* save reset value to override based on clk scheme */
+	if (qphy->ref_clk_base)
+		reset_val = readl_relaxed(qphy->base + QUSB2PHY_PLL_TEST);
+
+	if (qphy->qusb_phy_init_seq)
+		qusb_phy_write_seq(qphy->base, qphy->qusb_phy_init_seq,
+				qphy->init_seq_len, 0);
+
+	/*
+	 * Check for EFUSE value only if tune2_efuse_reg is available
+	 * and try to read EFUSE value only once i.e. not every USB
+	 * cable connect case.
+	 */
+	if (qphy->tune2_efuse_reg && !qphy->tune2) {
+		if (!qphy->tune2_val)
+			qusb_phy_get_tune2_param(qphy);
+
+		pr_debug("%s(): Programming TUNE2 parameter as:%x\n", __func__,
+				qphy->tune2_val);
+		writel_relaxed(qphy->tune2_val,
+				qphy->base + QUSB2PHY_PORT_TUNE2);
+	}
+
+	/* If tune modparam set, override tune value */
+	if (qphy->tune1) {
+		writel_relaxed(qphy->tune1,
+				qphy->base + QUSB2PHY_PORT_TUNE1);
+	}
+
+	if (qphy->tune2) {
+		writel_relaxed(qphy->tune2,
+				qphy->base + QUSB2PHY_PORT_TUNE2);
+	}
+
+	if (qphy->tune3) {
+		writel_relaxed(qphy->tune3,
+				qphy->base + QUSB2PHY_PORT_TUNE3);
+	}
+
+	if (qphy->tune4) {
+		writel_relaxed(qphy->tune4,
+				qphy->base + QUSB2PHY_PORT_TUNE4);
+	}
+
+	if (qphy->tune5) {
+		writel_relaxed(qphy->tune5,
+				qphy->base + QUSB2PHY_PORT_TUNE5);
+	}
+
+	/* ensure above writes are completed before re-enabling PHY */
+	wmb();
+
+	/* Enable the PHY */
+	if (qphy->major_rev < 2)
+		writel_relaxed(CLAMP_N_EN | FREEZIO_N,
+				qphy->base + QUSB2PHY_PORT_POWERDOWN);
+	else
+		writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) &
+				~PWR_CTRL1_POWR_DOWN,
+				qphy->base + QUSB2PHY_PWR_CTRL1);
+
+	/* Ensure above write is completed before turning ON ref clk */
+	wmb();
+
+	/* Require to get phy pll lock successfully */
+	usleep_range(150, 160);
+
+	/* Turn on phy ref_clk if DIFF_CLK else select SE_CLK */
+	if (qphy->ref_clk_base) {
+		if (!qphy->is_se_clk) {
+			reset_val &= ~CLK_REF_SEL;
+			writel_relaxed((readl_relaxed(qphy->ref_clk_base) |
+					QUSB2PHY_REFCLK_ENABLE),
+					qphy->ref_clk_base);
+		} else {
+			reset_val |= CLK_REF_SEL;
+			writel_relaxed(reset_val,
+					qphy->base + QUSB2PHY_PLL_TEST);
+		}
+
+		/* Make sure above write is completed to get PLL source clock */
+		wmb();
+
+		/* Required to get PHY PLL lock successfully */
+		usleep_range(100, 110);
+	}
+
+	if (qphy->major_rev < 2) {
+		reg = readb_relaxed(qphy->base + QUSB2PHY_PLL_STATUS);
+		dev_dbg(phy->dev, "QUSB2PHY_PLL_STATUS:%x\n", reg);
+		if (!(reg & QUSB2PHY_PLL_LOCK))
+			pll_lock_fail = true;
+	} else {
+		reg = readb_relaxed(qphy->base +
+				QUSB2PHY_PLL_COMMON_STATUS_ONE);
+		dev_dbg(phy->dev, "QUSB2PHY_PLL_COMMON_STATUS_ONE:%x\n", reg);
+		if (!(reg & CORE_READY_STATUS))
+			pll_lock_fail = true;
+	}
+
+	if (pll_lock_fail)
+		dev_err(phy->dev, "QUSB PHY PLL LOCK fails:%x\n", reg);
+
+	return 0;
+}
+
+static void qusb_phy_shutdown(struct usb_phy *phy)
+{
+	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+
+	dev_dbg(phy->dev, "%s\n", __func__);
+
+	qusb_phy_enable_clocks(qphy, true);
+
+	/* Disable the PHY */
+	if (qphy->major_rev < 2)
+		writel_relaxed(CLAMP_N_EN | FREEZIO_N | POWER_DOWN,
+				qphy->base + QUSB2PHY_PORT_POWERDOWN);
+	else
+		writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) |
+				PWR_CTRL1_POWR_DOWN,
+				qphy->base + QUSB2PHY_PWR_CTRL1);
+
+	/* Make sure above write complete before turning off clocks */
+	wmb();
+
+	qusb_phy_enable_clocks(qphy, false);
+}
+/**
+ * Performs QUSB2 PHY suspend/resume functionality.
+ *
+ * @uphy - usb phy pointer.
+ * @suspend - to enable suspend or not. 1 - suspend, 0 - resume
+ *
+ */
+static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
+{
+	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+	u32 linestate = 0, intr_mask = 0;
+
+	if (qphy->suspended == suspend) {
+		dev_dbg(phy->dev, "%s: USB PHY is already suspended\n",
+			__func__);
+		return 0;
+	}
+
+	if (suspend) {
+		/* Bus suspend case */
+		if (qphy->cable_connected) {
+			/* Clear all interrupts */
+			writel_relaxed(0x00,
+				qphy->base + QUSB2PHY_PORT_INTR_CTRL);
+
+			linestate = readl_relaxed(qphy->base +
+					QUSB2PHY_PORT_UTMI_STATUS);
+
+			/*
+			 * D+/D- interrupts are level-triggered, but we are
+			 * only interested if the line state changes, so enable
+			 * the high/low trigger based on current state. In
+			 * other words, enable the triggers _opposite_ of what
+			 * the current D+/D- levels are.
+			 * e.g. if currently D+ high, D- low (HS 'J'/Suspend),
+			 * configure the mask to trigger on D+ low OR D- high
+			 */
+			intr_mask = DPSE_INTR_EN | DMSE_INTR_EN;
+			if (!(linestate & LINESTATE_DP)) /* D+ low */
+				intr_mask |= DPSE_INTR_HIGH_SEL;
+			if (!(linestate & LINESTATE_DM)) /* D- low */
+				intr_mask |= DMSE_INTR_HIGH_SEL;
+
+			writel_relaxed(intr_mask,
+				qphy->base + QUSB2PHY_PORT_INTR_CTRL);
+
+			if (linestate & (LINESTATE_DP | LINESTATE_DM)) {
+				/* enable phy auto-resume */
+				writel_relaxed(0x0C,
+					qphy->base + QUSB2PHY_PORT_TEST_CTRL);
+				/* flush the previous write before next write */
+				wmb();
+				writel_relaxed(0x04,
+					qphy->base + QUSB2PHY_PORT_TEST_CTRL);
+			}
+
+
+			dev_dbg(phy->dev, "%s: intr_mask = %x\n",
+			__func__, intr_mask);
+
+			/* Makes sure that above write goes through */
+			wmb();
+
+			qusb_phy_enable_clocks(qphy, false);
+		} else { /* Disconnect case */
+			mutex_lock(&qphy->phy_lock);
+			/* Disable all interrupts */
+			writel_relaxed(0x00,
+				qphy->base + QUSB2PHY_PORT_INTR_CTRL);
+
+			/* Disable PHY */
+			writel_relaxed(POWER_DOWN | readl_relaxed(qphy->base +
+					QUSB2PHY_PORT_POWERDOWN),
+					qphy->base + QUSB2PHY_PORT_POWERDOWN);
+			/* Make sure that above write is completed */
+			wmb();
+
+			if (qphy->tcsr_clamp_dig_n)
+				writel_relaxed(0x0, qphy->tcsr_clamp_dig_n);
+
+			qusb_phy_enable_clocks(qphy, false);
+			qusb_phy_enable_power(qphy, false);
+			mutex_unlock(&qphy->phy_lock);
+
+			/*
+			 * Set put_into_high_z_state to true so next USB
+			 * cable connect, DPF_DMF request performs PHY
+			 * reset and put it into high-z state. For bootup
+			 * with or without USB cable, it doesn't require
+			 * to put QUSB PHY into high-z state.
+			 */
+			qphy->put_into_high_z_state = true;
+		}
+		qphy->suspended = true;
+	} else {
+		/* Bus suspend case */
+		if (qphy->cable_connected) {
+			qusb_phy_enable_clocks(qphy, true);
+			/* Clear all interrupts on resume */
+			writel_relaxed(0x00,
+				qphy->base + QUSB2PHY_PORT_INTR_CTRL);
+		} else {
+			qusb_phy_enable_power(qphy, true);
+			if (qphy->tcsr_clamp_dig_n)
+				writel_relaxed(0x1,
+					qphy->tcsr_clamp_dig_n);
+			qusb_phy_enable_clocks(qphy, true);
+		}
+		qphy->suspended = false;
+	}
+
+	return 0;
+}
+
+static int qusb_phy_notify_connect(struct usb_phy *phy,
+					enum usb_device_speed speed)
+{
+	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+
+	qphy->cable_connected = true;
+
+	dev_dbg(phy->dev, "QUSB PHY: connect notification cable_connected=%d\n",
+							qphy->cable_connected);
+	return 0;
+}
+
+static int qusb_phy_notify_disconnect(struct usb_phy *phy,
+					enum usb_device_speed speed)
+{
+	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+
+	qphy->cable_connected = false;
+
+	dev_dbg(phy->dev, "QUSB PHY: connect notification cable_connected=%d\n",
+							qphy->cable_connected);
+	return 0;
+}
+
+static int qusb_phy_dpdm_regulator_enable(struct regulator_dev *rdev)
+{
+	int ret = 0;
+	struct qusb_phy *qphy = rdev_get_drvdata(rdev);
+
+	dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n",
+				__func__, qphy->dpdm_enable);
+
+	mutex_lock(&qphy->phy_lock);
+	if (!qphy->dpdm_enable) {
+		ret = qusb_phy_enable_power(qphy, true);
+		if (ret < 0) {
+			dev_dbg(qphy->phy.dev,
+				"dpdm regulator enable failed:%d\n", ret);
+			mutex_unlock(&qphy->phy_lock);
+			return ret;
+		}
+		qphy->dpdm_enable = true;
+		if (qphy->put_into_high_z_state) {
+			if (qphy->tcsr_clamp_dig_n)
+				writel_relaxed(0x1,
+				qphy->tcsr_clamp_dig_n);
+
+			qusb_phy_gdsc(qphy, true);
+			qusb_phy_enable_clocks(qphy, true);
+
+			dev_dbg(qphy->phy.dev, "RESET QUSB PHY\n");
+			ret = reset_control_assert(qphy->phy_reset);
+			if (ret)
+				dev_err(qphy->phy.dev, "phyassert failed\n");
+			usleep_range(100, 150);
+			ret = reset_control_deassert(qphy->phy_reset);
+			if (ret)
+				dev_err(qphy->phy.dev, "deassert failed\n");
+
+			/*
+			 * Phy in non-driving mode leaves Dp and Dm
+			 * lines in high-Z state. Controller power
+			 * collapse is not switching phy to non-driving
+			 * mode causing charger detection failure. Bring
+			 * phy to non-driving mode by overriding
+			 * controller output via UTMI interface.
+			 */
+			writel_relaxed(TERM_SELECT | XCVR_SELECT_FS |
+				OP_MODE_NON_DRIVE,
+				qphy->base + QUSB2PHY_PORT_UTMI_CTRL1);
+			writel_relaxed(UTMI_ULPI_SEL |
+				UTMI_TEST_MUX_SEL,
+				qphy->base + QUSB2PHY_PORT_UTMI_CTRL2);
+
+
+			/* Disable PHY */
+			writel_relaxed(CLAMP_N_EN | FREEZIO_N |
+					POWER_DOWN,
+					qphy->base + QUSB2PHY_PORT_POWERDOWN);
+			/* Make sure that above write is completed */
+			wmb();
+
+			qusb_phy_enable_clocks(qphy, false);
+			qusb_phy_gdsc(qphy, false);
+		}
+	}
+	mutex_unlock(&qphy->phy_lock);
+
+	return ret;
+}
+
+static int qusb_phy_dpdm_regulator_disable(struct regulator_dev *rdev)
+{
+	int ret = 0;
+	struct qusb_phy *qphy = rdev_get_drvdata(rdev);
+
+	dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n",
+				__func__, qphy->dpdm_enable);
+
+	mutex_lock(&qphy->phy_lock);
+	if (qphy->dpdm_enable) {
+		/* If usb core is active, rely on set_suspend to clamp phy */
+		if (!qphy->cable_connected) {
+			if (qphy->tcsr_clamp_dig_n)
+				writel_relaxed(0x0,
+					qphy->tcsr_clamp_dig_n);
+		}
+		ret = qusb_phy_enable_power(qphy, false);
+		if (ret < 0) {
+			dev_dbg(qphy->phy.dev,
+				"dpdm regulator disable failed:%d\n", ret);
+			mutex_unlock(&qphy->phy_lock);
+			return ret;
+		}
+		qphy->dpdm_enable = false;
+	}
+	mutex_unlock(&qphy->phy_lock);
+
+	return ret;
+}
+
+static int qusb_phy_dpdm_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct qusb_phy *qphy = rdev_get_drvdata(rdev);
+
+	dev_dbg(qphy->phy.dev, "%s qphy->dpdm_enable = %d\n", __func__,
+					qphy->dpdm_enable);
+	return qphy->dpdm_enable;
+}
+
+static struct regulator_ops qusb_phy_dpdm_regulator_ops = {
+	.enable		= qusb_phy_dpdm_regulator_enable,
+	.disable	= qusb_phy_dpdm_regulator_disable,
+	.is_enabled	= qusb_phy_dpdm_regulator_is_enabled,
+};
+
+static int qusb_phy_regulator_init(struct qusb_phy *qphy)
+{
+	struct device *dev = qphy->phy.dev;
+	struct regulator_config cfg = {};
+	struct regulator_init_data *init_data;
+
+	init_data = devm_kzalloc(dev, sizeof(*init_data), GFP_KERNEL);
+	if (!init_data)
+		return -ENOMEM;
+
+	init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS;
+	qphy->dpdm_rdesc.owner = THIS_MODULE;
+	qphy->dpdm_rdesc.type = REGULATOR_VOLTAGE;
+	qphy->dpdm_rdesc.ops = &qusb_phy_dpdm_regulator_ops;
+	qphy->dpdm_rdesc.name = kbasename(dev->of_node->full_name);
+
+	cfg.dev = dev;
+	cfg.init_data = init_data;
+	cfg.driver_data = qphy;
+	cfg.of_node = dev->of_node;
+
+	qphy->dpdm_rdev = devm_regulator_register(dev, &qphy->dpdm_rdesc, &cfg);
+	return PTR_ERR_OR_ZERO(qphy->dpdm_rdev);
+
+}
+
+static void qusb_phy_create_debugfs(struct qusb_phy *qphy)
+{
+	qphy->root = debugfs_create_dir(dev_name(qphy->phy.dev), NULL);
+	debugfs_create_x8("tune1", 0644, qphy->root, &qphy->tune1);
+	debugfs_create_x8("tune2", 0644, qphy->root, &qphy->tune2);
+	debugfs_create_x8("tune3", 0644, qphy->root, &qphy->tune3);
+	debugfs_create_x8("tune4", 0644, qphy->root, &qphy->tune4);
+	debugfs_create_x8("tune5", 0644, qphy->root, &qphy->tune5);
+}
+
+static int qusb_phy_probe(struct platform_device *pdev)
+{
+	struct qusb_phy *qphy;
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	int ret = 0, size = 0;
+	const char *phy_type;
+	bool hold_phy_reset;
+	u32 temp;
+
+	qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
+	if (!qphy)
+		return -ENOMEM;
+
+	qphy->phy.dev = dev;
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"qusb_phy_base");
+	qphy->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(qphy->base))
+		return PTR_ERR(qphy->base);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"tune2_efuse_addr");
+	if (res) {
+		qphy->tune2_efuse_reg = devm_ioremap_nocache(dev, res->start,
+							resource_size(res));
+		if (!IS_ERR_OR_NULL(qphy->tune2_efuse_reg)) {
+			ret = of_property_read_u32(dev->of_node,
+					"qcom,tune2-efuse-bit-pos",
+					&qphy->tune2_efuse_bit_pos);
+			if (!ret) {
+				ret = of_property_read_u32(dev->of_node,
+						"qcom,tune2-efuse-num-bits",
+						&qphy->tune2_efuse_num_of_bits);
+			}
+			of_property_read_u32(dev->of_node,
+						"qcom,tune2-efuse-correction",
+						&qphy->tune2_efuse_correction);
+
+			if (ret) {
+				dev_err(dev, "DT Value for tune2 efuse is invalid.\n");
+				return -EINVAL;
+			}
+		}
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"ref_clk_addr");
+	if (res) {
+		qphy->ref_clk_base = devm_ioremap_nocache(dev,
+				res->start, resource_size(res));
+		if (IS_ERR(qphy->ref_clk_base)) {
+			dev_dbg(dev, "ref_clk_address is not available.\n");
+			return PTR_ERR(qphy->ref_clk_base);
+		}
+
+		ret = of_property_read_string(dev->of_node,
+				"qcom,phy-clk-scheme", &phy_type);
+		if (ret) {
+			dev_err(dev, "error need qsub_phy_clk_scheme.\n");
+			return ret;
+		}
+
+		if (!strcasecmp(phy_type, "cml")) {
+			qphy->is_se_clk = false;
+		} else if (!strcasecmp(phy_type, "cmos")) {
+			qphy->is_se_clk = true;
+		} else {
+			dev_err(dev, "erro invalid qusb_phy_clk_scheme\n");
+			return -EINVAL;
+		}
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"tcsr_clamp_dig_n_1p8");
+	if (res) {
+		qphy->tcsr_clamp_dig_n = devm_ioremap_nocache(dev,
+				res->start, resource_size(res));
+		if (IS_ERR(qphy->tcsr_clamp_dig_n)) {
+			dev_err(dev, "err reading tcsr_clamp_dig_n\n");
+			qphy->tcsr_clamp_dig_n = NULL;
+		}
+	}
+
+	ret = of_property_read_u32(dev->of_node, "qcom,usb-hs-ac-bitmask",
+					&qphy->usb_hs_ac_bitmask);
+	if (!ret) {
+		ret = of_property_read_u32(dev->of_node, "qcom,usb-hs-ac-value",
+						&qphy->usb_hs_ac_value);
+		if (ret) {
+			dev_err(dev, "usb_hs_ac_value not passed\n", __func__);
+			return ret;
+		}
+
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"tcsr_conn_box_spare_0");
+		if (!res) {
+			dev_err(dev, "tcsr_conn_box_spare_0 not passed\n",
+								__func__);
+			return -ENOENT;
+		}
+
+		qphy->tcsr_conn_box_spare = devm_ioremap_nocache(dev,
+						res->start, resource_size(res));
+		if (IS_ERR(qphy->tcsr_conn_box_spare)) {
+			dev_err(dev, "err reading tcsr_conn_box_spare\n");
+			return PTR_ERR(qphy->tcsr_conn_box_spare);
+		}
+	}
+
+	qphy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
+	if (IS_ERR(qphy->ref_clk_src))
+		dev_dbg(dev, "clk get failed for ref_clk_src\n");
+
+	/* ref_clk is needed only for DIFF_CLK case, hence make it optional. */
+	if (of_property_match_string(pdev->dev.of_node,
+				"clock-names", "ref_clk") >= 0) {
+		qphy->ref_clk = devm_clk_get(dev, "ref_clk");
+		if (IS_ERR(qphy->ref_clk)) {
+			ret = PTR_ERR(qphy->ref_clk);
+			if (ret != -EPROBE_DEFER)
+				dev_dbg(dev,
+					"clk get failed for ref_clk\n");
+			return ret;
+		}
+
+		clk_set_rate(qphy->ref_clk, 19200000);
+	}
+
+	qphy->cfg_ahb_clk = devm_clk_get(dev, "cfg_ahb_clk");
+	if (IS_ERR(qphy->cfg_ahb_clk))
+		return PTR_ERR(qphy->cfg_ahb_clk);
+
+	qphy->phy_reset = devm_reset_control_get(dev, "phy_reset");
+	if (IS_ERR(qphy->phy_reset))
+		return PTR_ERR(qphy->phy_reset);
+
+	if (of_property_match_string(dev->of_node,
+		"clock-names", "iface_clk") >= 0) {
+		qphy->iface_clk = devm_clk_get(dev, "iface_clk");
+		if (IS_ERR(qphy->iface_clk)) {
+			ret = PTR_ERR(qphy->iface_clk);
+			qphy->iface_clk = NULL;
+		if (ret == -EPROBE_DEFER)
+			return ret;
+			dev_err(dev, "couldn't get iface_clk(%d)\n", ret);
+		}
+	}
+
+	if (of_property_match_string(dev->of_node,
+		"clock-names", "core_clk") >= 0) {
+		qphy->core_clk = devm_clk_get(dev, "core_clk");
+		if (IS_ERR(qphy->core_clk)) {
+			ret = PTR_ERR(qphy->core_clk);
+			qphy->core_clk = NULL;
+			if (ret == -EPROBE_DEFER)
+				return ret;
+			dev_err(dev, "couldn't get core_clk(%d)\n", ret);
+		}
+	}
+
+	qphy->gdsc = devm_regulator_get(dev, "USB3_GDSC");
+	if (IS_ERR(qphy->gdsc))
+		qphy->gdsc = NULL;
+
+	size = 0;
+	of_get_property(dev->of_node, "qcom,qusb-phy-init-seq", &size);
+	if (size) {
+		qphy->qusb_phy_init_seq = devm_kzalloc(dev,
+						size, GFP_KERNEL);
+		if (qphy->qusb_phy_init_seq) {
+			qphy->init_seq_len =
+				(size / sizeof(*qphy->qusb_phy_init_seq));
+			if (qphy->init_seq_len % 2) {
+				dev_err(dev, "invalid init_seq_len\n");
+				return -EINVAL;
+			}
+
+			of_property_read_u32_array(dev->of_node,
+				"qcom,qusb-phy-init-seq",
+				qphy->qusb_phy_init_seq,
+				qphy->init_seq_len);
+		} else {
+			dev_err(dev, "error allocating memory for phy_init_seq\n");
+		}
+	}
+
+	qphy->ulpi_mode = false;
+	ret = of_property_read_string(dev->of_node, "phy_type", &phy_type);
+
+	if (!ret) {
+		if (!strcasecmp(phy_type, "ulpi"))
+			qphy->ulpi_mode = true;
+	} else {
+		dev_err(dev, "error reading phy_type property\n");
+		return ret;
+	}
+
+	hold_phy_reset = of_property_read_bool(dev->of_node, "qcom,hold-reset");
+
+	/* use default major revision as 2 */
+	qphy->major_rev = 2;
+	ret = of_property_read_u32(dev->of_node, "qcom,major-rev",
+						&qphy->major_rev);
+
+	ret = of_property_read_u32_array(dev->of_node, "qcom,vdd-voltage-level",
+					 (u32 *) qphy->vdd_levels,
+					 ARRAY_SIZE(qphy->vdd_levels));
+	if (ret) {
+		dev_err(dev, "error reading qcom,vdd-voltage-level property\n");
+		return ret;
+	}
+
+	qphy->vdd = devm_regulator_get(dev, "vdd");
+	if (IS_ERR(qphy->vdd)) {
+		dev_err(dev, "unable to get vdd supply\n");
+		return PTR_ERR(qphy->vdd);
+	}
+
+	qphy->vdda33 = devm_regulator_get(dev, "vdda33");
+	if (IS_ERR(qphy->vdda33)) {
+		dev_err(dev, "unable to get vdda33 supply\n");
+		return PTR_ERR(qphy->vdda33);
+	}
+
+	qphy->vdda18 = devm_regulator_get(dev, "vdda18");
+	if (IS_ERR(qphy->vdda18)) {
+		dev_err(dev, "unable to get vdda18 supply\n");
+		return PTR_ERR(qphy->vdda18);
+	}
+
+	mutex_init(&qphy->phy_lock);
+	platform_set_drvdata(pdev, qphy);
+
+	qphy->phy.label			= "msm-qusb-phy";
+	qphy->phy.init			= qusb_phy_init;
+	qphy->phy.set_suspend           = qusb_phy_set_suspend;
+	qphy->phy.shutdown		= qusb_phy_shutdown;
+	qphy->phy.type			= USB_PHY_TYPE_USB2;
+	qphy->phy.notify_connect        = qusb_phy_notify_connect;
+	qphy->phy.notify_disconnect     = qusb_phy_notify_disconnect;
+
+	/*
+	 * On some platforms multiple QUSB PHYs are available. If QUSB PHY is
+	 * not used, there is leakage current seen with QUSB PHY related voltage
+	 * rail. Hence keep QUSB PHY into reset state explicitly here.
+	 */
+	if (hold_phy_reset) {
+		ret = reset_control_assert(qphy->phy_reset);
+		if (ret)
+			dev_err(dev, "%s:phy_reset assert failed\n", __func__);
+	}
+
+	ret = usb_add_phy_dev(&qphy->phy);
+	if (ret)
+		return ret;
+
+	ret = qusb_phy_regulator_init(qphy);
+	if (ret)
+		usb_remove_phy(&qphy->phy);
+
+	/* de-assert clamp dig n to reduce leakage on 1p8 upon boot up */
+	if (qphy->tcsr_clamp_dig_n)
+		writel_relaxed(0x0, qphy->tcsr_clamp_dig_n);
+
+	/*
+	 * Write the usb_hs_ac_value to usb_hs_ac_bitmask of tcsr_conn_box_spare
+	 * reg to enable AC/DC coupling
+	 */
+	if (qphy->tcsr_conn_box_spare) {
+		temp = readl_relaxed(qphy->tcsr_conn_box_spare) &
+						~qphy->usb_hs_ac_bitmask;
+		writel_relaxed(temp | qphy->usb_hs_ac_value,
+						qphy->tcsr_conn_box_spare);
+	}
+
+	qphy->suspended = true;
+
+	qusb_phy_create_debugfs(qphy);
+
+	return ret;
+}
+
+static int qusb_phy_remove(struct platform_device *pdev)
+{
+	struct qusb_phy *qphy = platform_get_drvdata(pdev);
+
+	debugfs_remove_recursive(qphy->root);
+	usb_remove_phy(&qphy->phy);
+	qphy->cable_connected = false;
+	qusb_phy_set_suspend(&qphy->phy, true);
+
+	return 0;
+}
+
+static const struct of_device_id qusb_phy_id_table[] = {
+	{ .compatible = "qcom,qusb2phy", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, qusb_phy_id_table);
+
+static struct platform_driver qusb_phy_driver = {
+	.probe		= qusb_phy_probe,
+	.remove		= qusb_phy_remove,
+	.driver = {
+		.name	= "msm-qusb-phy",
+		.of_match_table = of_match_ptr(qusb_phy_id_table),
+	},
+};
+
+module_platform_driver(qusb_phy_driver);
+
+MODULE_DESCRIPTION("MSM QUSB2 PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e0a4749..56f572c 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -968,6 +968,11 @@ static const struct usb_device_id option_ids[] = {
 	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
 	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
 
+	/* Motorola devices */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2a70, 0xff, 0xff, 0xff) },	/* mdm6600 */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2e0a, 0xff, 0xff, 0xff) },	/* mdm9600 */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x4281, 0x0a, 0x00, 0xfc) },	/* mdm ram dl */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x900e, 0xff, 0xff, 0xff) },	/* mdm qc dl */
 
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
@@ -1549,6 +1554,7 @@ static const struct usb_device_id option_ids[] = {
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff),  /* Telewell TW-LTE 4G v2 */
 	  .driver_info = RSVD(2) },
 	{ USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) },	/* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1952,11 +1958,15 @@ static const struct usb_device_id option_ids[] = {
 	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff),			/* D-Link DWM-222 */
 	  .driver_info = RSVD(4) },
+	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e3d, 0xff),			/* D-Link DWM-222 A2 */
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) },	/* D-Link DWM-152/C1 */
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) },	/* D-Link DWM-156/C1 */
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) },	/* D-Link DWM-156/A3 */
 	{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff),			/* Olicard 600 */
 	  .driver_info = RSVD(4) },
+	{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff),			/* BroadMobi BM818 */
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },			/* OLICARD300 - MT6225 */
 	{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
 	{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
index 3457c1f..5f29ce8 100644
--- a/drivers/usb/typec/tcpm.c
+++ b/drivers/usb/typec/tcpm.c
@@ -378,7 +378,8 @@ static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
 			return SNK_UNATTACHED;
 		else if (port->try_role == TYPEC_SOURCE)
 			return SRC_UNATTACHED;
-		else if (port->tcpc->config->default_role == TYPEC_SINK)
+		else if (port->tcpc->config &&
+			 port->tcpc->config->default_role == TYPEC_SINK)
 			return SNK_UNATTACHED;
 		/* Fall through to return SRC_UNATTACHED */
 	} else if (port->port_type == TYPEC_PORT_SNK) {
@@ -585,7 +586,20 @@ static void tcpm_debugfs_init(struct tcpm_port *port)
 
 static void tcpm_debugfs_exit(struct tcpm_port *port)
 {
+	int i;
+
+	mutex_lock(&port->logbuffer_lock);
+	for (i = 0; i < LOG_BUFFER_ENTRIES; i++) {
+		kfree(port->logbuffer[i]);
+		port->logbuffer[i] = NULL;
+	}
+	mutex_unlock(&port->logbuffer_lock);
+
 	debugfs_remove(port->dentry);
+	if (list_empty(&rootdir->d_subdirs)) {
+		debugfs_remove(rootdir);
+		rootdir = NULL;
+	}
 }
 
 #else
@@ -1094,7 +1108,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
 			break;
 		case CMD_ATTENTION:
 			/* Attention command does not have response */
-			typec_altmode_attention(adev, p[1]);
+			if (adev)
+				typec_altmode_attention(adev, p[1]);
 			return 0;
 		default:
 			break;
@@ -1146,20 +1161,26 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
 			}
 			break;
 		case CMD_ENTER_MODE:
-			typec_altmode_update_active(pdev, true);
+			if (adev && pdev) {
+				typec_altmode_update_active(pdev, true);
 
-			if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
-				response[0] = VDO(adev->svid, 1, CMD_EXIT_MODE);
-				response[0] |= VDO_OPOS(adev->mode);
-				return 1;
+				if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
+					response[0] = VDO(adev->svid, 1,
+							  CMD_EXIT_MODE);
+					response[0] |= VDO_OPOS(adev->mode);
+					return 1;
+				}
 			}
 			return 0;
 		case CMD_EXIT_MODE:
-			typec_altmode_update_active(pdev, false);
+			if (adev && pdev) {
+				typec_altmode_update_active(pdev, false);
 
-			/* Back to USB Operation */
-			WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB,
-						     NULL));
+				/* Back to USB Operation */
+				WARN_ON(typec_altmode_notify(adev,
+							     TYPEC_STATE_USB,
+							     NULL));
+			}
 			break;
 		default:
 			break;
@@ -1169,8 +1190,10 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
 		switch (cmd) {
 		case CMD_ENTER_MODE:
 			/* Back to USB Operation */
-			WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB,
-						     NULL));
+			if (adev)
+				WARN_ON(typec_altmode_notify(adev,
+							     TYPEC_STATE_USB,
+							     NULL));
 			break;
 		default:
 			break;
@@ -1181,7 +1204,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
 	}
 
 	/* Informing the alternate mode drivers about everything */
-	typec_altmode_vdm(adev, p[0], &p[1], cnt);
+	if (adev)
+		typec_altmode_vdm(adev, p[0], &p[1], cnt);
 
 	return rlen;
 }
@@ -4083,7 +4107,7 @@ static int tcpm_try_role(const struct typec_capability *cap, int role)
 	mutex_lock(&port->lock);
 	if (tcpc->try_role)
 		ret = tcpc->try_role(tcpc, role);
-	if (!ret && !tcpc->config->try_role_hw)
+	if (!ret && (!tcpc->config || !tcpc->config->try_role_hw))
 		port->try_role = role;
 	port->try_src_count = 0;
 	port->try_snk_count = 0;
@@ -4730,7 +4754,7 @@ static int tcpm_copy_caps(struct tcpm_port *port,
 	port->typec_caps.prefer_role = tcfg->default_role;
 	port->typec_caps.type = tcfg->type;
 	port->typec_caps.data = tcfg->data;
-	port->self_powered = port->tcpc->config->self_powered;
+	port->self_powered = tcfg->self_powered;
 
 	return 0;
 }
diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c
index 73427d8e..e569413 100644
--- a/drivers/xen/xen-pciback/conf_space_capability.c
+++ b/drivers/xen/xen-pciback/conf_space_capability.c
@@ -116,13 +116,12 @@ static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
 {
 	int err;
 	u16 old_value;
-	pci_power_t new_state, old_state;
+	pci_power_t new_state;
 
 	err = pci_read_config_word(dev, offset, &old_value);
 	if (err)
 		goto out;
 
-	old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
 	new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
 
 	new_value &= PM_OK_BITS;
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index ac6c383..1985565 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1485,7 +1485,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr)
 		goto out;
 	}
 
-	trans = btrfs_attach_transaction(root);
+	trans = btrfs_join_transaction_nostart(root);
 	if (IS_ERR(trans)) {
 		if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
 			ret = PTR_ERR(trans);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index f1ca53a..26317bc 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -28,15 +28,18 @@ static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
 	[TRANS_STATE_COMMIT_START]	= (__TRANS_START | __TRANS_ATTACH),
 	[TRANS_STATE_COMMIT_DOING]	= (__TRANS_START |
 					   __TRANS_ATTACH |
-					   __TRANS_JOIN),
+					   __TRANS_JOIN |
+					   __TRANS_JOIN_NOSTART),
 	[TRANS_STATE_UNBLOCKED]		= (__TRANS_START |
 					   __TRANS_ATTACH |
 					   __TRANS_JOIN |
-					   __TRANS_JOIN_NOLOCK),
+					   __TRANS_JOIN_NOLOCK |
+					   __TRANS_JOIN_NOSTART),
 	[TRANS_STATE_COMPLETED]		= (__TRANS_START |
 					   __TRANS_ATTACH |
 					   __TRANS_JOIN |
-					   __TRANS_JOIN_NOLOCK),
+					   __TRANS_JOIN_NOLOCK |
+					   __TRANS_JOIN_NOSTART),
 };
 
 void btrfs_put_transaction(struct btrfs_transaction *transaction)
@@ -531,7 +534,8 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
 		ret = join_transaction(fs_info, type);
 		if (ret == -EBUSY) {
 			wait_current_trans(fs_info);
-			if (unlikely(type == TRANS_ATTACH))
+			if (unlikely(type == TRANS_ATTACH ||
+				     type == TRANS_JOIN_NOSTART))
 				ret = -ENOENT;
 		}
 	} while (ret == -EBUSY);
@@ -648,6 +652,16 @@ struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root
 }
 
 /*
+ * Similar to regular join but it never starts a transaction when none is
+ * running or after waiting for the current one to finish.
+ */
+struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root)
+{
+	return start_transaction(root, 0, TRANS_JOIN_NOSTART,
+				 BTRFS_RESERVE_NO_FLUSH, true);
+}
+
+/*
  * btrfs_attach_transaction() - catch the running transaction
  *
  * It is used when we want to commit the current the transaction, but
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 4cbb1b5..c1d34cc 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -97,11 +97,13 @@ struct btrfs_transaction {
 #define __TRANS_JOIN		(1U << 11)
 #define __TRANS_JOIN_NOLOCK	(1U << 12)
 #define __TRANS_DUMMY		(1U << 13)
+#define __TRANS_JOIN_NOSTART	(1U << 14)
 
 #define TRANS_START		(__TRANS_START | __TRANS_FREEZABLE)
 #define TRANS_ATTACH		(__TRANS_ATTACH)
 #define TRANS_JOIN		(__TRANS_JOIN | __TRANS_FREEZABLE)
 #define TRANS_JOIN_NOLOCK	(__TRANS_JOIN_NOLOCK)
+#define TRANS_JOIN_NOSTART	(__TRANS_JOIN_NOSTART)
 
 #define TRANS_EXTWRITERS	(__TRANS_START | __TRANS_ATTACH)
 
@@ -187,6 +189,7 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
 					int min_factor);
 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
+struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
 struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
 					struct btrfs_root *root);
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 9c332a6..476728b 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -913,8 +913,9 @@ static int ceph_writepages_start(struct address_space *mapping,
 			if (page_offset(page) >= ceph_wbc.i_size) {
 				dout("%p page eof %llu\n",
 				     page, ceph_wbc.i_size);
-				if (ceph_wbc.size_stable ||
-				    page_offset(page) >= i_size_read(inode))
+				if ((ceph_wbc.size_stable ||
+				    page_offset(page) >= i_size_read(inode)) &&
+				    clear_page_dirty_for_io(page))
 					mapping->a_ops->invalidatepage(page,
 								0, PAGE_SIZE);
 				unlock_page(page);
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index 9dae2ec..6a8f4a9 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -111,8 +111,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
 		req->r_wait_for_completion = ceph_lock_wait_for_completion;
 
 	err = ceph_mdsc_do_request(mdsc, inode, req);
-
-	if (operation == CEPH_MDS_OP_GETFILELOCK) {
+	if (!err && operation == CEPH_MDS_OP_GETFILELOCK) {
 		fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid);
 		if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type)
 			fl->fl_type = F_RDLCK;
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 0ccf8f9..cc9e846 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -2545,7 +2545,15 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
 static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
 				   unsigned int buflen)
 {
-	sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
+	void *addr;
+	/*
+	 * VMAP_STACK (at least) puts stack into the vmalloc address space
+	 */
+	if (is_vmalloc_addr(buf))
+		addr = vmalloc_to_page(buf);
+	else
+		addr = virt_to_page(buf);
+	sg_set_page(sg, addr, buflen, offset_in_page(buf));
 }
 
 /* Assumes the first rqst has a transform header as the first iov.
@@ -3121,7 +3129,6 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
 {
 	int ret, length;
 	char *buf = server->smallbuf;
-	char *tmpbuf;
 	struct smb2_sync_hdr *shdr;
 	unsigned int pdu_length = server->pdu_size;
 	unsigned int buf_size;
@@ -3151,18 +3158,15 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
 		return length;
 
 	next_is_large = server->large_buf;
- one_more:
+one_more:
 	shdr = (struct smb2_sync_hdr *)buf;
 	if (shdr->NextCommand) {
-		if (next_is_large) {
-			tmpbuf = server->bigbuf;
+		if (next_is_large)
 			next_buffer = (char *)cifs_buf_get();
-		} else {
-			tmpbuf = server->smallbuf;
+		else
 			next_buffer = (char *)cifs_small_buf_get();
-		}
 		memcpy(next_buffer,
-		       tmpbuf + le32_to_cpu(shdr->NextCommand),
+		       buf + le32_to_cpu(shdr->NextCommand),
 		       pdu_length - le32_to_cpu(shdr->NextCommand));
 	}
 
@@ -3191,12 +3195,21 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
 		pdu_length -= le32_to_cpu(shdr->NextCommand);
 		server->large_buf = next_is_large;
 		if (next_is_large)
-			server->bigbuf = next_buffer;
+			server->bigbuf = buf = next_buffer;
 		else
-			server->smallbuf = next_buffer;
-
-		buf += le32_to_cpu(shdr->NextCommand);
+			server->smallbuf = buf = next_buffer;
 		goto one_more;
+	} else if (ret != 0) {
+		/*
+		 * ret != 0 here means that we didn't get to handle_mid() thus
+		 * server->smallbuf and server->bigbuf are still valid. We need
+		 * to free next_buffer because it is not going to be used
+		 * anywhere.
+		 */
+		if (next_is_large)
+			free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer);
+		else
+			free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer);
 	}
 
 	return ret;
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index c181f16..2bc47eb 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -168,7 +168,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
 	if (tcon == NULL)
 		return 0;
 
-	if (smb2_command == SMB2_TREE_CONNECT)
+	if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
 		return 0;
 
 	if (tcon->tidStatus == CifsExiting) {
@@ -1006,7 +1006,12 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
 	else
 		req->SecurityMode = 0;
 
+#ifdef CONFIG_CIFS_DFS_UPCALL
+	req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS);
+#else
 	req->Capabilities = 0;
+#endif /* DFS_UPCALL */
+
 	req->Channel = 0; /* MBZ */
 
 	sess_data->iov[0].iov_base = (char *)req;
diff --git a/fs/dax.c b/fs/dax.c
index 75a289c..f0d932f 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -659,7 +659,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
 	 * guaranteed to either see new references or prevent new
 	 * references from being established.
 	 */
-	unmap_mapping_range(mapping, 0, 0, 1);
+	unmap_mapping_range(mapping, 0, 0, 0);
 
 	while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
 				min(end - index, (pgoff_t)PAGEVEC_SIZE),
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 7f8bb08..d14d71d 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -392,6 +392,19 @@ static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
 	return mp->mp_aheight - x - 1;
 }
 
+static sector_t metapath_to_block(struct gfs2_sbd *sdp, struct metapath *mp)
+{
+	sector_t factor = 1, block = 0;
+	int hgt;
+
+	for (hgt = mp->mp_fheight - 1; hgt >= 0; hgt--) {
+		if (hgt < mp->mp_aheight)
+			block += mp->mp_list[hgt] * factor;
+		factor *= sdp->sd_inptrs;
+	}
+	return block;
+}
+
 static void release_metapath(struct metapath *mp)
 {
 	int i;
@@ -432,60 +445,84 @@ static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *pt
 	return ptr - first;
 }
 
-typedef const __be64 *(*gfs2_metadata_walker)(
-		struct metapath *mp,
-		const __be64 *start, const __be64 *end,
-		u64 factor, void *data);
+enum walker_status { WALK_STOP, WALK_FOLLOW, WALK_CONTINUE };
 
-#define WALK_STOP ((__be64 *)0)
-#define WALK_NEXT ((__be64 *)1)
+/*
+ * gfs2_metadata_walker - walk an indirect block
+ * @mp: Metapath to indirect block
+ * @ptrs: Number of pointers to look at
+ *
+ * When returning WALK_FOLLOW, the walker must update @mp to point at the right
+ * indirect block to follow.
+ */
+typedef enum walker_status (*gfs2_metadata_walker)(struct metapath *mp,
+						   unsigned int ptrs);
 
-static int gfs2_walk_metadata(struct inode *inode, sector_t lblock,
-		u64 len, struct metapath *mp, gfs2_metadata_walker walker,
-		void *data)
+/*
+ * gfs2_walk_metadata - walk a tree of indirect blocks
+ * @inode: The inode
+ * @mp: Starting point of walk
+ * @max_len: Maximum number of blocks to walk
+ * @walker: Called during the walk
+ *
+ * Returns 1 if the walk was stopped by @walker, 0 if we went past @max_len or
+ * past the end of metadata, and a negative error code otherwise.
+ */
+
+static int gfs2_walk_metadata(struct inode *inode, struct metapath *mp,
+		u64 max_len, gfs2_metadata_walker walker)
 {
-	struct metapath clone;
 	struct gfs2_inode *ip = GFS2_I(inode);
 	struct gfs2_sbd *sdp = GFS2_SB(inode);
-	const __be64 *start, *end, *ptr;
 	u64 factor = 1;
 	unsigned int hgt;
-	int ret = 0;
+	int ret;
 
-	for (hgt = ip->i_height - 1; hgt >= mp->mp_aheight; hgt--)
+	/*
+	 * The walk starts in the lowest allocated indirect block, which may be
+	 * before the position indicated by @mp.  Adjust @max_len accordingly
+	 * to avoid a short walk.
+	 */
+	for (hgt = mp->mp_fheight - 1; hgt >= mp->mp_aheight; hgt--) {
+		max_len += mp->mp_list[hgt] * factor;
+		mp->mp_list[hgt] = 0;
 		factor *= sdp->sd_inptrs;
+	}
 
 	for (;;) {
-		u64 step;
+		u16 start = mp->mp_list[hgt];
+		enum walker_status status;
+		unsigned int ptrs;
+		u64 len;
 
 		/* Walk indirect block. */
-		start = metapointer(hgt, mp);
-		end = metaend(hgt, mp);
-
-		step = (end - start) * factor;
-		if (step > len)
-			end = start + DIV_ROUND_UP_ULL(len, factor);
-
-		ptr = walker(mp, start, end, factor, data);
-		if (ptr == WALK_STOP)
+		ptrs = (hgt >= 1 ? sdp->sd_inptrs : sdp->sd_diptrs) - start;
+		len = ptrs * factor;
+		if (len > max_len)
+			ptrs = DIV_ROUND_UP_ULL(max_len, factor);
+		status = walker(mp, ptrs);
+		switch (status) {
+		case WALK_STOP:
+			return 1;
+		case WALK_FOLLOW:
+			BUG_ON(mp->mp_aheight == mp->mp_fheight);
+			ptrs = mp->mp_list[hgt] - start;
+			len = ptrs * factor;
 			break;
-		if (step >= len)
+		case WALK_CONTINUE:
 			break;
-		len -= step;
-		if (ptr != WALK_NEXT) {
-			BUG_ON(!*ptr);
-			mp->mp_list[hgt] += ptr - start;
-			goto fill_up_metapath;
 		}
+		if (len >= max_len)
+			break;
+		max_len -= len;
+		if (status == WALK_FOLLOW)
+			goto fill_up_metapath;
 
 lower_metapath:
 		/* Decrease height of metapath. */
-		if (mp != &clone) {
-			clone_metapath(&clone, mp);
-			mp = &clone;
-		}
 		brelse(mp->mp_bh[hgt]);
 		mp->mp_bh[hgt] = NULL;
+		mp->mp_list[hgt] = 0;
 		if (!hgt)
 			break;
 		hgt--;
@@ -493,10 +530,7 @@ static int gfs2_walk_metadata(struct inode *inode, sector_t lblock,
 
 		/* Advance in metadata tree. */
 		(mp->mp_list[hgt])++;
-		start = metapointer(hgt, mp);
-		end = metaend(hgt, mp);
-		if (start >= end) {
-			mp->mp_list[hgt] = 0;
+		if (mp->mp_list[hgt] >= sdp->sd_inptrs) {
 			if (!hgt)
 				break;
 			goto lower_metapath;
@@ -504,44 +538,36 @@ static int gfs2_walk_metadata(struct inode *inode, sector_t lblock,
 
 fill_up_metapath:
 		/* Increase height of metapath. */
-		if (mp != &clone) {
-			clone_metapath(&clone, mp);
-			mp = &clone;
-		}
 		ret = fillup_metapath(ip, mp, ip->i_height - 1);
 		if (ret < 0)
-			break;
+			return ret;
 		hgt += ret;
 		for (; ret; ret--)
 			do_div(factor, sdp->sd_inptrs);
 		mp->mp_aheight = hgt + 1;
 	}
-	if (mp == &clone)
-		release_metapath(mp);
-	return ret;
+	return 0;
 }
 
-struct gfs2_hole_walker_args {
-	u64 blocks;
-};
-
-static const __be64 *gfs2_hole_walker(struct metapath *mp,
-		const __be64 *start, const __be64 *end,
-		u64 factor, void *data)
+static enum walker_status gfs2_hole_walker(struct metapath *mp,
+					   unsigned int ptrs)
 {
-	struct gfs2_hole_walker_args *args = data;
-	const __be64 *ptr;
+	const __be64 *start, *ptr, *end;
+	unsigned int hgt;
+
+	hgt = mp->mp_aheight - 1;
+	start = metapointer(hgt, mp);
+	end = start + ptrs;
 
 	for (ptr = start; ptr < end; ptr++) {
 		if (*ptr) {
-			args->blocks += (ptr - start) * factor;
+			mp->mp_list[hgt] += ptr - start;
 			if (mp->mp_aheight == mp->mp_fheight)
 				return WALK_STOP;
-			return ptr;  /* increase height */
+			return WALK_FOLLOW;
 		}
 	}
-	args->blocks += (end - start) * factor;
-	return WALK_NEXT;
+	return WALK_CONTINUE;
 }
 
 /**
@@ -559,12 +585,24 @@ static const __be64 *gfs2_hole_walker(struct metapath *mp,
 static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len,
 			  struct metapath *mp, struct iomap *iomap)
 {
-	struct gfs2_hole_walker_args args = { };
-	int ret = 0;
+	struct metapath clone;
+	u64 hole_size;
+	int ret;
 
-	ret = gfs2_walk_metadata(inode, lblock, len, mp, gfs2_hole_walker, &args);
-	if (!ret)
-		iomap->length = args.blocks << inode->i_blkbits;
+	clone_metapath(&clone, mp);
+	ret = gfs2_walk_metadata(inode, &clone, len, gfs2_hole_walker);
+	if (ret < 0)
+		goto out;
+
+	if (ret == 1)
+		hole_size = metapath_to_block(GFS2_SB(inode), &clone) - lblock;
+	else
+		hole_size = len;
+	iomap->length = hole_size << inode->i_blkbits;
+	ret = 0;
+
+out:
+	release_metapath(&clone);
 	return ret;
 }
 
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index 4dc8878..a7bc4e0 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -118,6 +118,10 @@ void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int
 	struct rb_node **p, *parent;
 	int diff;
 
+	nfss->fscache_key = NULL;
+	nfss->fscache = NULL;
+	if (!(nfss->options & NFS_OPTION_FSCACHE))
+		return;
 	if (!uniq) {
 		uniq = "";
 		ulen = 1;
@@ -230,10 +234,11 @@ void nfs_fscache_release_super_cookie(struct super_block *sb)
 void nfs_fscache_init_inode(struct inode *inode)
 {
 	struct nfs_fscache_inode_auxdata auxdata;
+	struct nfs_server *nfss = NFS_SERVER(inode);
 	struct nfs_inode *nfsi = NFS_I(inode);
 
 	nfsi->fscache = NULL;
-	if (!S_ISREG(inode->i_mode))
+	if (!(nfss->fscache && S_ISREG(inode->i_mode)))
 		return;
 
 	memset(&auxdata, 0, sizeof(auxdata));
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
index 161ba2e..6363ea9 100644
--- a/fs/nfs/fscache.h
+++ b/fs/nfs/fscache.h
@@ -186,7 +186,7 @@ static inline void nfs_fscache_wait_on_invalidate(struct inode *inode)
  */
 static inline const char *nfs_server_fscache_state(struct nfs_server *server)
 {
-	if (server->fscache && (server->options & NFS_OPTION_FSCACHE))
+	if (server->fscache)
 		return "yes";
 	return "no ";
 }
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 63287d9..5b61520 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -469,7 +469,8 @@ static inline void nfs4_schedule_session_recovery(struct nfs4_session *session,
 
 extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *, gfp_t);
 extern void nfs4_put_state_owner(struct nfs4_state_owner *);
-extern void nfs4_purge_state_owners(struct nfs_server *);
+extern void nfs4_purge_state_owners(struct nfs_server *, struct list_head *);
+extern void nfs4_free_state_owners(struct list_head *head);
 extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
 extern void nfs4_put_open_state(struct nfs4_state *);
 extern void nfs4_close_state(struct nfs4_state *, fmode_t);
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 8f53455..86991bc 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -754,9 +754,12 @@ int nfs41_walk_client_list(struct nfs_client *new,
 
 static void nfs4_destroy_server(struct nfs_server *server)
 {
+	LIST_HEAD(freeme);
+
 	nfs_server_return_all_delegations(server);
 	unset_pnfs_layoutdriver(server);
-	nfs4_purge_state_owners(server);
+	nfs4_purge_state_owners(server, &freeme);
+	nfs4_free_state_owners(&freeme);
 }
 
 /*
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 904e08b..31ae3bd 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3133,7 +3133,7 @@ static int _nfs4_do_setattr(struct inode *inode,
 
 	if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) {
 		/* Use that stateid */
-	} else if (ctx != NULL) {
+	} else if (ctx != NULL && ctx->state) {
 		struct nfs_lock_context *l_ctx;
 		if (!nfs4_valid_open_stateid(ctx->state))
 			return -EBADF;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 3ba2087..c36ef75 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -628,24 +628,39 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp)
 /**
  * nfs4_purge_state_owners - Release all cached state owners
  * @server: nfs_server with cached state owners to release
+ * @head: resulting list of state owners
  *
  * Called at umount time.  Remaining state owners will be on
  * the LRU with ref count of zero.
+ * Note that the state owners are not freed, but are added
+ * to the list @head, which can later be used as an argument
+ * to nfs4_free_state_owners.
  */
-void nfs4_purge_state_owners(struct nfs_server *server)
+void nfs4_purge_state_owners(struct nfs_server *server, struct list_head *head)
 {
 	struct nfs_client *clp = server->nfs_client;
 	struct nfs4_state_owner *sp, *tmp;
-	LIST_HEAD(doomed);
 
 	spin_lock(&clp->cl_lock);
 	list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
-		list_move(&sp->so_lru, &doomed);
+		list_move(&sp->so_lru, head);
 		nfs4_remove_state_owner_locked(sp);
 	}
 	spin_unlock(&clp->cl_lock);
+}
 
-	list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
+/**
+ * nfs4_purge_state_owners - Release all cached state owners
+ * @head: resulting list of state owners
+ *
+ * Frees a list of state owners that was generated by
+ * nfs4_purge_state_owners
+ */
+void nfs4_free_state_owners(struct list_head *head)
+{
+	struct nfs4_state_owner *sp, *tmp;
+
+	list_for_each_entry_safe(sp, tmp, head, so_lru) {
 		list_del(&sp->so_lru);
 		nfs4_free_state_owner(sp);
 	}
@@ -1843,12 +1858,13 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
 	struct nfs4_state_owner *sp;
 	struct nfs_server *server;
 	struct rb_node *pos;
+	LIST_HEAD(freeme);
 	int status = 0;
 
 restart:
 	rcu_read_lock();
 	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
-		nfs4_purge_state_owners(server);
+		nfs4_purge_state_owners(server, &freeme);
 		spin_lock(&clp->cl_lock);
 		for (pos = rb_first(&server->state_owners);
 		     pos != NULL;
@@ -1877,6 +1893,7 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
 		spin_unlock(&clp->cl_lock);
 	}
 	rcu_read_unlock();
+	nfs4_free_state_owners(&freeme);
 	return 0;
 }
 
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 6df9b85..d90efde 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2239,6 +2239,7 @@ nfs_compare_remount_data(struct nfs_server *nfss,
 	    data->acdirmin != nfss->acdirmin / HZ ||
 	    data->acdirmax != nfss->acdirmax / HZ ||
 	    data->timeo != (10U * nfss->client->cl_timeout->to_initval / HZ) ||
+	    (data->options & NFS_OPTION_FSCACHE) != (nfss->options & NFS_OPTION_FSCACHE) ||
 	    data->nfs_server.port != nfss->port ||
 	    data->nfs_server.addrlen != nfss->nfs_client->cl_addrlen ||
 	    !rpc_cmp_addr((struct sockaddr *)&data->nfs_server.address,
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 3a24ce3..c146e12 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -3833,7 +3833,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
 	u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
 	int low_bucket = 0, bucket, high_bucket;
 	struct ocfs2_xattr_bucket *search;
-	u32 last_hash;
 	u64 blkno, lower_blkno = 0;
 
 	search = ocfs2_xattr_bucket_new(inode);
@@ -3877,8 +3876,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
 		if (xh->xh_count)
 			xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1];
 
-		last_hash = le32_to_cpu(xe->xe_name_hash);
-
 		/* record lower_blkno which may be the insert place. */
 		lower_blkno = blkno;
 
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 1dea7a8..05e58b5 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -119,6 +119,7 @@ static int traverse(struct seq_file *m, loff_t offset)
 		}
 		if (seq_has_overflowed(m))
 			goto Eoverflow;
+		p = m->op->next(m, p, &m->index);
 		if (pos + m->count > offset) {
 			m->from = offset - pos;
 			m->count -= m->from;
@@ -126,7 +127,6 @@ static int traverse(struct seq_file *m, loff_t offset)
 		}
 		pos += m->count;
 		m->count = 0;
-		p = m->op->next(m, p, &m->index);
 		if (pos == offset)
 			break;
 	}
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 7468c96..5a519bc 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -884,6 +884,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
 	/* len == 0 means wake all */
 	struct userfaultfd_wake_range range = { .len = 0, };
 	unsigned long new_flags;
+	bool still_valid;
 
 	WRITE_ONCE(ctx->released, true);
 
@@ -899,8 +900,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
 	 * taking the mmap_sem for writing.
 	 */
 	down_write(&mm->mmap_sem);
-	if (!mmget_still_valid(mm))
-		goto skip_mm;
+	still_valid = mmget_still_valid(mm);
 	prev = NULL;
 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
 		cond_resched();
@@ -911,22 +911,23 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
 			continue;
 		}
 		new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
-		prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
-				 new_flags, vma->anon_vma,
-				 vma->vm_file, vma->vm_pgoff,
-				 vma_policy(vma),
-				 NULL_VM_UFFD_CTX,
-				 vma_get_anon_name(vma));
-		if (prev)
-			vma = prev;
-		else
-			prev = vma;
+		if (still_valid) {
+			prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
+					 new_flags, vma->anon_vma,
+					 vma->vm_file, vma->vm_pgoff,
+					 vma_policy(vma),
+					 NULL_VM_UFFD_CTX,
+					 vma_get_anon_name(vma));
+			if (prev)
+				vma = prev;
+			else
+				prev = vma;
+		}
 		vm_write_begin(vma);
 		WRITE_ONCE(vma->vm_flags, new_flags);
 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
 		vm_write_end(vma);
 	}
-skip_mm:
 	up_write(&mm->mmap_sem);
 	mmput(mm);
 wakeup:
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index c6299f8..6410d3e 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -191,6 +191,121 @@ xfs_attr_calc_size(
 	return nblks;
 }
 
+STATIC int
+xfs_attr_try_sf_addname(
+	struct xfs_inode	*dp,
+	struct xfs_da_args	*args)
+{
+
+	struct xfs_mount	*mp = dp->i_mount;
+	int			error, error2;
+
+	error = xfs_attr_shortform_addname(args);
+	if (error == -ENOSPC)
+		return error;
+
+	/*
+	 * Commit the shortform mods, and we're done.
+	 * NOTE: this is also the error path (EEXIST, etc).
+	 */
+	if (!error && (args->flags & ATTR_KERNOTIME) == 0)
+		xfs_trans_ichgtime(args->trans, dp, XFS_ICHGTIME_CHG);
+
+	if (mp->m_flags & XFS_MOUNT_WSYNC)
+		xfs_trans_set_sync(args->trans);
+
+	error2 = xfs_trans_commit(args->trans);
+	args->trans = NULL;
+	return error ? error : error2;
+}
+
+/*
+ * Set the attribute specified in @args.
+ */
+int
+xfs_attr_set_args(
+	struct xfs_da_args	*args)
+{
+	struct xfs_inode	*dp = args->dp;
+	struct xfs_buf          *leaf_bp = NULL;
+	int			error;
+
+	/*
+	 * If the attribute list is non-existent or a shortform list,
+	 * upgrade it to a single-leaf-block attribute list.
+	 */
+	if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL ||
+	    (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
+	     dp->i_d.di_anextents == 0)) {
+
+		/*
+		 * Build initial attribute list (if required).
+		 */
+		if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS)
+			xfs_attr_shortform_create(args);
+
+		/*
+		 * Try to add the attr to the attribute list in the inode.
+		 */
+		error = xfs_attr_try_sf_addname(dp, args);
+		if (error != -ENOSPC)
+			return error;
+
+		/*
+		 * It won't fit in the shortform, transform to a leaf block.
+		 * GROT: another possible req'mt for a double-split btree op.
+		 */
+		error = xfs_attr_shortform_to_leaf(args, &leaf_bp);
+		if (error)
+			return error;
+
+		/*
+		 * Prevent the leaf buffer from being unlocked so that a
+		 * concurrent AIL push cannot grab the half-baked leaf
+		 * buffer and run into problems with the write verifier.
+		 * Once we're done rolling the transaction we can release
+		 * the hold and add the attr to the leaf.
+		 */
+		xfs_trans_bhold(args->trans, leaf_bp);
+		error = xfs_defer_finish(&args->trans);
+		xfs_trans_bhold_release(args->trans, leaf_bp);
+		if (error) {
+			xfs_trans_brelse(args->trans, leaf_bp);
+			return error;
+		}
+	}
+
+	if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
+		error = xfs_attr_leaf_addname(args);
+	else
+		error = xfs_attr_node_addname(args);
+	return error;
+}
+
+/*
+ * Remove the attribute specified in @args.
+ */
+int
+xfs_attr_remove_args(
+	struct xfs_da_args      *args)
+{
+	struct xfs_inode	*dp = args->dp;
+	int			error;
+
+	if (!xfs_inode_hasattr(dp)) {
+		error = -ENOATTR;
+	} else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
+		ASSERT(dp->i_afp->if_flags & XFS_IFINLINE);
+		error = xfs_attr_shortform_remove(args);
+	} else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
+		error = xfs_attr_leaf_removename(args);
+	} else {
+		error = xfs_attr_node_removename(args);
+	}
+
+	return error;
+}
+
 int
 xfs_attr_set(
 	struct xfs_inode	*dp,
@@ -200,11 +315,10 @@ xfs_attr_set(
 	int			flags)
 {
 	struct xfs_mount	*mp = dp->i_mount;
-	struct xfs_buf		*leaf_bp = NULL;
 	struct xfs_da_args	args;
 	struct xfs_trans_res	tres;
 	int			rsvd = (flags & ATTR_ROOT) != 0;
-	int			error, err2, local;
+	int			error, local;
 
 	XFS_STATS_INC(mp, xs_attr_set);
 
@@ -255,93 +369,17 @@ xfs_attr_set(
 	error = xfs_trans_reserve_quota_nblks(args.trans, dp, args.total, 0,
 				rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
 				       XFS_QMOPT_RES_REGBLKS);
-	if (error) {
-		xfs_iunlock(dp, XFS_ILOCK_EXCL);
-		xfs_trans_cancel(args.trans);
-		return error;
-	}
+	if (error)
+		goto out_trans_cancel;
 
 	xfs_trans_ijoin(args.trans, dp, 0);
-
-	/*
-	 * If the attribute list is non-existent or a shortform list,
-	 * upgrade it to a single-leaf-block attribute list.
-	 */
-	if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL ||
-	    (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
-	     dp->i_d.di_anextents == 0)) {
-
-		/*
-		 * Build initial attribute list (if required).
-		 */
-		if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS)
-			xfs_attr_shortform_create(&args);
-
-		/*
-		 * Try to add the attr to the attribute list in
-		 * the inode.
-		 */
-		error = xfs_attr_shortform_addname(&args);
-		if (error != -ENOSPC) {
-			/*
-			 * Commit the shortform mods, and we're done.
-			 * NOTE: this is also the error path (EEXIST, etc).
-			 */
-			ASSERT(args.trans != NULL);
-
-			/*
-			 * If this is a synchronous mount, make sure that
-			 * the transaction goes to disk before returning
-			 * to the user.
-			 */
-			if (mp->m_flags & XFS_MOUNT_WSYNC)
-				xfs_trans_set_sync(args.trans);
-
-			if (!error && (flags & ATTR_KERNOTIME) == 0) {
-				xfs_trans_ichgtime(args.trans, dp,
-							XFS_ICHGTIME_CHG);
-			}
-			err2 = xfs_trans_commit(args.trans);
-			xfs_iunlock(dp, XFS_ILOCK_EXCL);
-
-			return error ? error : err2;
-		}
-
-		/*
-		 * It won't fit in the shortform, transform to a leaf block.
-		 * GROT: another possible req'mt for a double-split btree op.
-		 */
-		error = xfs_attr_shortform_to_leaf(&args, &leaf_bp);
-		if (error)
-			goto out;
-		/*
-		 * Prevent the leaf buffer from being unlocked so that a
-		 * concurrent AIL push cannot grab the half-baked leaf
-		 * buffer and run into problems with the write verifier.
-		 */
-		xfs_trans_bhold(args.trans, leaf_bp);
-		error = xfs_defer_finish(&args.trans);
-		if (error)
-			goto out;
-
-		/*
-		 * Commit the leaf transformation.  We'll need another (linked)
-		 * transaction to add the new attribute to the leaf, which
-		 * means that we have to hold & join the leaf buffer here too.
-		 */
-		error = xfs_trans_roll_inode(&args.trans, dp);
-		if (error)
-			goto out;
-		xfs_trans_bjoin(args.trans, leaf_bp);
-		leaf_bp = NULL;
-	}
-
-	if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
-		error = xfs_attr_leaf_addname(&args);
-	else
-		error = xfs_attr_node_addname(&args);
+	error = xfs_attr_set_args(&args);
 	if (error)
-		goto out;
+		goto out_trans_cancel;
+	if (!args.trans) {
+		/* shortform attribute has already been committed */
+		goto out_unlock;
+	}
 
 	/*
 	 * If this is a synchronous mount, make sure that the
@@ -358,17 +396,14 @@ xfs_attr_set(
 	 */
 	xfs_trans_log_inode(args.trans, dp, XFS_ILOG_CORE);
 	error = xfs_trans_commit(args.trans);
+out_unlock:
 	xfs_iunlock(dp, XFS_ILOCK_EXCL);
-
 	return error;
 
-out:
-	if (leaf_bp)
-		xfs_trans_brelse(args.trans, leaf_bp);
+out_trans_cancel:
 	if (args.trans)
 		xfs_trans_cancel(args.trans);
-	xfs_iunlock(dp, XFS_ILOCK_EXCL);
-	return error;
+	goto out_unlock;
 }
 
 /*
@@ -423,17 +458,7 @@ xfs_attr_remove(
 	 */
 	xfs_trans_ijoin(args.trans, dp, 0);
 
-	if (!xfs_inode_hasattr(dp)) {
-		error = -ENOATTR;
-	} else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
-		ASSERT(dp->i_afp->if_flags & XFS_IFINLINE);
-		error = xfs_attr_shortform_remove(&args);
-	} else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
-		error = xfs_attr_leaf_removename(&args);
-	} else {
-		error = xfs_attr_node_removename(&args);
-	}
-
+	error = xfs_attr_remove_args(&args);
 	if (error)
 		goto out;
 
diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/libxfs/xfs_attr.h
similarity index 97%
rename from fs/xfs/xfs_attr.h
rename to fs/xfs/libxfs/xfs_attr.h
index 033ff8c..cc04ee0 100644
--- a/fs/xfs/xfs_attr.h
+++ b/fs/xfs/libxfs/xfs_attr.h
@@ -140,7 +140,9 @@ int xfs_attr_get(struct xfs_inode *ip, const unsigned char *name,
 		 unsigned char *value, int *valuelenp, int flags);
 int xfs_attr_set(struct xfs_inode *dp, const unsigned char *name,
 		 unsigned char *value, int valuelen, int flags);
+int xfs_attr_set_args(struct xfs_da_args *args);
 int xfs_attr_remove(struct xfs_inode *dp, const unsigned char *name, int flags);
+int xfs_attr_remove_args(struct xfs_da_args *args);
 int xfs_attr_list(struct xfs_inode *dp, char *buffer, int bufsize,
 		  int flags, struct attrlist_cursor_kern *cursor);
 
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 3a496ff..06a7da8 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -1019,6 +1019,34 @@ xfs_bmap_add_attrfork_local(
 	return -EFSCORRUPTED;
 }
 
+/* Set an inode attr fork off based on the format */
+int
+xfs_bmap_set_attrforkoff(
+	struct xfs_inode	*ip,
+	int			size,
+	int			*version)
+{
+	switch (ip->i_d.di_format) {
+	case XFS_DINODE_FMT_DEV:
+		ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
+		break;
+	case XFS_DINODE_FMT_LOCAL:
+	case XFS_DINODE_FMT_EXTENTS:
+	case XFS_DINODE_FMT_BTREE:
+		ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
+		if (!ip->i_d.di_forkoff)
+			ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
+		else if ((ip->i_mount->m_flags & XFS_MOUNT_ATTR2) && version)
+			*version = 2;
+		break;
+	default:
+		ASSERT(0);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 /*
  * Convert inode from non-attributed to attributed.
  * Must not be in a transaction, ip must not be locked.
@@ -1070,26 +1098,9 @@ xfs_bmap_add_attrfork(
 
 	xfs_trans_ijoin(tp, ip, 0);
 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-
-	switch (ip->i_d.di_format) {
-	case XFS_DINODE_FMT_DEV:
-		ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
-		break;
-	case XFS_DINODE_FMT_LOCAL:
-	case XFS_DINODE_FMT_EXTENTS:
-	case XFS_DINODE_FMT_BTREE:
-		ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
-		if (!ip->i_d.di_forkoff)
-			ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
-		else if (mp->m_flags & XFS_MOUNT_ATTR2)
-			version = 2;
-		break;
-	default:
-		ASSERT(0);
-		error = -EINVAL;
+	error = xfs_bmap_set_attrforkoff(ip, size, &version);
+	if (error)
 		goto trans_cancel;
-	}
-
 	ASSERT(ip->i_afp == NULL);
 	ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
 	ip->i_afp->if_flags = XFS_IFEXTENTS;
@@ -1178,7 +1189,10 @@ xfs_iread_extents(
 	 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
 	 */
 	level = be16_to_cpu(block->bb_level);
-	ASSERT(level > 0);
+	if (unlikely(level == 0)) {
+		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
+		return -EFSCORRUPTED;
+	}
 	pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
 	bno = be64_to_cpu(*pp);
 
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index b6e9b63..488dc88 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -183,6 +183,7 @@ void	xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
 		xfs_filblks_t len);
 void	xfs_trim_extent_eof(struct xfs_bmbt_irec *, struct xfs_inode *);
 int	xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
+int	xfs_bmap_set_attrforkoff(struct xfs_inode *ip, int size, int *version);
 void	xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
 void	__xfs_bmap_add_free(struct xfs_trans *tp, xfs_fsblock_t bno,
 		xfs_filblks_t len, struct xfs_owner_info *oinfo,
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
index e792b16..c52beee 100644
--- a/fs/xfs/libxfs/xfs_defer.c
+++ b/fs/xfs/libxfs/xfs_defer.c
@@ -266,13 +266,15 @@ xfs_defer_trans_roll(
 
 	trace_xfs_defer_trans_roll(tp, _RET_IP_);
 
-	/* Roll the transaction. */
+	/*
+	 * Roll the transaction.  Rolling always given a new transaction (even
+	 * if committing the old one fails!) to hand back to the caller, so we
+	 * join the held resources to the new transaction so that we always
+	 * return with the held resources joined to @tpp, no matter what
+	 * happened.
+	 */
 	error = xfs_trans_roll(tpp);
 	tp = *tpp;
-	if (error) {
-		trace_xfs_defer_trans_roll_error(tp, error);
-		return error;
-	}
 
 	/* Rejoin the joined inodes. */
 	for (i = 0; i < ipcount; i++)
@@ -284,6 +286,8 @@ xfs_defer_trans_roll(
 		xfs_trans_bhold(tp, bplist[i]);
 	}
 
+	if (error)
+		trace_xfs_defer_trans_roll_error(tp, error);
 	return error;
 }
 
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 87e6dd53..a1af984 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -277,7 +277,8 @@ xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
 
 /*
  * Ensure that the given in-core dquot has a buffer on disk backing it, and
- * return the buffer. This is called when the bmapi finds a hole.
+ * return the buffer locked and held. This is called when the bmapi finds a
+ * hole.
  */
 STATIC int
 xfs_dquot_disk_alloc(
@@ -355,13 +356,14 @@ xfs_dquot_disk_alloc(
 	 * If everything succeeds, the caller of this function is returned a
 	 * buffer that is locked and held to the transaction.  The caller
 	 * is responsible for unlocking any buffer passed back, either
-	 * manually or by committing the transaction.
+	 * manually or by committing the transaction.  On error, the buffer is
+	 * released and not passed back.
 	 */
 	xfs_trans_bhold(tp, bp);
 	error = xfs_defer_finish(tpp);
-	tp = *tpp;
 	if (error) {
-		xfs_buf_relse(bp);
+		xfs_trans_bhold_release(*tpp, bp);
+		xfs_trans_brelse(*tpp, bp);
 		return error;
 	}
 	*bpp = bp;
@@ -521,7 +523,6 @@ xfs_qm_dqread_alloc(
 	struct xfs_buf		**bpp)
 {
 	struct xfs_trans	*tp;
-	struct xfs_buf		*bp;
 	int			error;
 
 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc,
@@ -529,7 +530,7 @@ xfs_qm_dqread_alloc(
 	if (error)
 		goto err;
 
-	error = xfs_dquot_disk_alloc(&tp, dqp, &bp);
+	error = xfs_dquot_disk_alloc(&tp, dqp, bpp);
 	if (error)
 		goto err_cancel;
 
@@ -539,10 +540,10 @@ xfs_qm_dqread_alloc(
 		 * Buffer was held to the transaction, so we have to unlock it
 		 * manually here because we're not passing it back.
 		 */
-		xfs_buf_relse(bp);
+		xfs_buf_relse(*bpp);
+		*bpp = NULL;
 		goto err;
 	}
-	*bpp = bp;
 	return 0;
 
 err_cancel:
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 74047bd..e427ad0 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -803,6 +803,7 @@ xfs_setattr_nonsize(
 
 out_cancel:
 	xfs_trans_cancel(tp);
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 out_dqrele:
 	xfs_qm_dqrele(udqp);
 	xfs_qm_dqrele(gdqp);
diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h
index c64bea7..e9f20b8 100644
--- a/include/asm-generic/getorder.h
+++ b/include/asm-generic/getorder.h
@@ -7,24 +7,6 @@
 #include <linux/compiler.h>
 #include <linux/log2.h>
 
-/*
- * Runtime evaluation of get_order()
- */
-static inline __attribute_const__
-int __get_order(unsigned long size)
-{
-	int order;
-
-	size--;
-	size >>= PAGE_SHIFT;
-#if BITS_PER_LONG == 32
-	order = fls(size);
-#else
-	order = fls64(size);
-#endif
-	return order;
-}
-
 /**
  * get_order - Determine the allocation order of a memory size
  * @size: The size for which to get the order
@@ -43,19 +25,27 @@ int __get_order(unsigned long size)
  * to hold an object of the specified size.
  *
  * The result is undefined if the size is 0.
- *
- * This function may be used to initialise variables with compile time
- * evaluations of constants.
  */
-#define get_order(n)						\
-(								\
-	__builtin_constant_p(n) ? (				\
-		((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT :	\
-		(((n) < (1UL << PAGE_SHIFT)) ? 0 :		\
-		 ilog2((n) - 1) - PAGE_SHIFT + 1)		\
-	) :							\
-	__get_order(n)						\
-)
+static inline __attribute_const__ int get_order(unsigned long size)
+{
+	if (__builtin_constant_p(size)) {
+		if (!size)
+			return BITS_PER_LONG - PAGE_SHIFT;
+
+		if (size < (1UL << PAGE_SHIFT))
+			return 0;
+
+		return ilog2((size) - 1) - PAGE_SHIFT + 1;
+	}
+
+	size--;
+	size >>= PAGE_SHIFT;
+#if BITS_PER_LONG == 32
+	return fls(size);
+#else
+	return fls64(size);
+#endif
+}
 
 #endif	/* __ASSEMBLY__ */
 
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index fbf5cfc9b3..fd965ff 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -386,6 +386,7 @@
 	INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \
 	INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \
 	INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \
+	INTEL_VGA_DEVICE(0x3E98, info), /* SRV GT2 */ \
 	INTEL_VGA_DEVICE(0x3E9A, info)  /* SRV GT2 */
 
 /* CFL H */
diff --git a/include/dt-bindings/clock/qcom,gpucc-bengal.h b/include/dt-bindings/clock/qcom,gpucc-bengal.h
index fa92b18..181005b 100644
--- a/include/dt-bindings/clock/qcom,gpucc-bengal.h
+++ b/include/dt-bindings/clock/qcom,gpucc-bengal.h
@@ -14,16 +14,15 @@
 #define GPU_CC_AHB_CLK						4
 #define GPU_CC_CRC_AHB_CLK					5
 #define GPU_CC_CX_GFX3D_CLK					6
-#define GPU_CC_CX_GFX3D_SLV_CLK					7
-#define GPU_CC_CX_GMU_CLK					8
-#define GPU_CC_CX_SNOC_DVM_CLK					9
-#define GPU_CC_CXO_AON_CLK					10
-#define GPU_CC_CXO_CLK						11
-#define GPU_CC_GMU_CLK_SRC					12
-#define GPU_CC_GX_CXO_CLK					13
-#define GPU_CC_GX_GFX3D_CLK					14
-#define GPU_CC_GX_GFX3D_CLK_SRC					15
-#define GPU_CC_SLEEP_CLK					16
-#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK				17
+#define GPU_CC_CX_GMU_CLK					7
+#define GPU_CC_CX_SNOC_DVM_CLK					8
+#define GPU_CC_CXO_AON_CLK					9
+#define GPU_CC_CXO_CLK						10
+#define GPU_CC_GMU_CLK_SRC					11
+#define GPU_CC_GX_CXO_CLK					12
+#define GPU_CC_GX_GFX3D_CLK					13
+#define GPU_CC_GX_GFX3D_CLK_SRC					14
+#define GPU_CC_SLEEP_CLK					15
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK				16
 
 #endif
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 90ac450..561fefc 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -361,6 +361,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
 
 void kvm_vgic_load(struct kvm_vcpu *vcpu);
 void kvm_vgic_put(struct kvm_vcpu *vcpu);
+void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu);
 
 #define irqchip_in_kernel(k)	(!!((k)->arch.vgic.in_kernel))
 #define vgic_initialized(k)	((k)->arch.vgic.initialized)
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index 7e9c991..43ed9e7 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -173,6 +173,8 @@ struct ccp_aes_engine {
 	enum ccp_aes_mode mode;
 	enum ccp_aes_action action;
 
+	u32 authsize;
+
 	struct scatterlist *key;
 	u32 key_len;		/* In bytes */
 
diff --git a/include/linux/ipa.h b/include/linux/ipa.h
index 33fd4e9..27bbf4e 100644
--- a/include/linux/ipa.h
+++ b/include/linux/ipa.h
@@ -13,6 +13,8 @@
 #include "linux/msm_gsi.h"
 
 #define IPA_APPS_MAX_BW_IN_MBPS 700
+#define IPA_BW_THRESHOLD_MAX 3
+
 /**
  * enum ipa_transport_type
  * transport type: either GSI or SPS
@@ -501,10 +503,13 @@ typedef void (*ipa_notify_cb)(void *priv, enum ipa_dp_evt_type evt,
  *			use ipa_get_wdi_sap_stats structure
  * @IPA_SET_WIFI_QUOTA: set quota limit on STA -
  *			use ipa_set_wifi_quota structure
+ * @IPA_SET_WLAN_BW: set wlan BW -
+ *			use ipa_set_wlan_bw structure
  */
 enum ipa_wdi_meter_evt_type {
 	IPA_GET_WDI_SAP_STATS,
 	IPA_SET_WIFI_QUOTA,
+	IPA_INFORM_WLAN_BW,
 };
 
 struct ipa_get_wdi_sap_stats {
@@ -540,6 +545,19 @@ struct ipa_set_wifi_quota {
 	uint8_t set_valid;
 };
 
+/**
+ * struct ipa_inform_wlan_bw - structure used for
+ *                                   IPA_INFORM_WLAN_BW.
+ *
+ * @index:       Indicate which bw-index hit
+ * @throughput:  throughput usage
+ *
+ */
+struct ipa_inform_wlan_bw {
+	uint8_t  index;
+	uint64_t throughput;
+};
+
 typedef void (*ipa_wdi_meter_notifier_cb)(enum ipa_wdi_meter_evt_type evt,
 		       void *data);
 
@@ -1154,6 +1172,32 @@ struct ipa_wdi_buffer_info {
 };
 
 /**
+ * struct  ipa_wdi_bw_info - address info of a WLAN allocated buffer
+ * @threshold: throughput wants to be monitored
+ * @num: number of threshold entries
+ * @stop: true to stop monitoring
+ *
+ * IPA driver will create/release IOMMU mapping in IPA SMMU from iova->pa
+ */
+struct ipa_wdi_bw_info {
+	uint64_t threshold[IPA_BW_THRESHOLD_MAX];
+	int num;
+	bool stop;
+};
+
+/**
+ * struct  ipa_wdi_tx_info - sw tx info from WLAN
+ * @sta_tx: sw tx stats on sta interface
+ * @ap_tx: sw tx stats on ap interface
+ *
+ * IPA driver will create/release IOMMU mapping in IPA SMMU from iova->pa
+ */
+struct ipa_wdi_tx_info {
+	uint64_t sta_tx;
+	uint64_t ap_tx;
+};
+
+/**
  * struct ipa_gsi_ep_config - IPA GSI endpoint configurations
  *
  * @ipa_ep_num: IPA EP pipe number
@@ -1421,6 +1465,8 @@ int ipa_disable_wdi_pipe(u32 clnt_hdl);
 int ipa_resume_wdi_pipe(u32 clnt_hdl);
 int ipa_suspend_wdi_pipe(u32 clnt_hdl);
 int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
+int ipa_uc_bw_monitor(struct ipa_wdi_bw_info *info);
+int ipa_set_wlan_tx_info(struct ipa_wdi_tx_info *info);
 u16 ipa_get_smem_restr_bytes(void);
 int ipa_broadcast_wdi_quota_reach_ind(uint32_t fid,
 		uint64_t num_bytes);
@@ -2357,6 +2403,16 @@ static inline int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats)
 	return -EPERM;
 }
 
+static inline int ipa_uc_bw_monitor(struct ipa_wdi_bw_info *info)
+{
+	return -EPERM;
+}
+
+static inline int ipa_set_wlan_tx_info(struct ipa_wdi_tx_info *info)
+{
+	return -EPERM;
+}
+
 static inline int ipa_get_ep_mapping(enum ipa_client_type client)
 {
 	return -EPERM;
diff --git a/include/linux/ipa_wdi3.h b/include/linux/ipa_wdi3.h
index fed0082..c1497c5 100644
--- a/include/linux/ipa_wdi3.h
+++ b/include/linux/ipa_wdi3.h
@@ -347,6 +347,29 @@ int ipa_wdi_release_smmu_mapping(u32 num_buffers,
  */
 int ipa_wdi_get_stats(struct IpaHwStatsWDIInfoData_t *stats);
 
+
+/**
+ * ipa_wdi_bw_monitor() - set wdi BW monitoring
+ * @info:	[inout] info blob from client populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa_wdi_bw_monitor(struct ipa_wdi_bw_info *info);
+
+/**
+ * ipa_wdi_sw_stats() - set wdi BW monitoring
+ * @info:	[inout] info blob from client populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa_wdi_sw_stats(struct ipa_wdi_tx_info *info);
+
 #else /* (CONFIG_IPA || CONFIG_IPA3) */
 
 static inline int ipa_wdi_init(struct ipa_wdi_init_in_params *in,
@@ -415,6 +438,16 @@ static inline int ipa_wdi_get_stats(struct IpaHwStatsWDIInfoData_t *stats)
 	return -EPERM;
 }
 
+static inline int ipa_wdi_bw_monitor(struct ipa_wdi_bw_info *info)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wdi_sw_stats(struct ipa_wdi_tx_info *info)
+{
+	return -EPERM;
+}
+
 #endif /* CONFIG_IPA3 */
 
 #endif /* _IPA_WDI3_H_ */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 30efb36..d42a36e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -818,6 +818,7 @@ void kvm_arch_check_processor_compat(void *rtn);
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
+bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
 
 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
 /*
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index 1c4b37c..9c1c709 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -38,7 +38,7 @@ enum MHI_CB {
 };
 
 /**
- * enum MHI_DEBUG_LEVL - various debugging level
+ * enum MHI_DEBUG_LEVEL - various debugging level
  */
 enum MHI_DEBUG_LEVEL {
 	MHI_MSG_LVL_VERBOSE,
@@ -46,6 +46,7 @@ enum MHI_DEBUG_LEVEL {
 	MHI_MSG_LVL_ERROR,
 	MHI_MSG_LVL_CRITICAL,
 	MHI_MSG_LVL_MASK_ALL,
+	MHI_MSG_LVL_MAX,
 };
 
 /**
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 3ab6949..d76803d 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -34,6 +34,8 @@ extern unsigned int sysctl_sched_child_runs_first;
 extern unsigned int sysctl_sched_capacity_margin_up[MAX_MARGIN_LEVELS];
 extern unsigned int sysctl_sched_capacity_margin_down[MAX_MARGIN_LEVELS];
 #ifdef CONFIG_SCHED_WALT
+extern unsigned int sysctl_sched_user_hint;
+extern const int sched_user_hint_max;
 extern unsigned int sysctl_sched_cpu_high_irqload;
 extern unsigned int sysctl_sched_boost;
 extern unsigned int sysctl_sched_group_upmigrate_pct;
@@ -49,6 +51,10 @@ extern int
 walt_proc_group_thresholds_handler(struct ctl_table *table, int write,
 			 void __user *buffer, size_t *lenp,
 			 loff_t *ppos);
+extern int
+walt_proc_user_hint_handler(struct ctl_table *table, int write,
+			 void __user *buffer, size_t *lenp,
+			 loff_t *ppos);
 
 #endif
 
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 3424613..6021b41 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -342,6 +342,60 @@ struct ieee80211_sband_iftype_data {
 };
 
 /**
+ * enum ieee80211_edmg_bw_config - allowed channel bandwidth configurations
+ *
+ * @IEEE80211_EDMG_BW_CONFIG_4: 2.16GHz
+ * @IEEE80211_EDMG_BW_CONFIG_5: 2.16GHz and 4.32GHz
+ * @IEEE80211_EDMG_BW_CONFIG_6: 2.16GHz, 4.32GHz and 6.48GHz
+ * @IEEE80211_EDMG_BW_CONFIG_7: 2.16GHz, 4.32GHz, 6.48GHz and 8.64GHz
+ * @IEEE80211_EDMG_BW_CONFIG_8: 2.16GHz and 2.16GHz + 2.16GHz
+ * @IEEE80211_EDMG_BW_CONFIG_9: 2.16GHz, 4.32GHz and 2.16GHz + 2.16GHz
+ * @IEEE80211_EDMG_BW_CONFIG_10: 2.16GHz, 4.32GHz, 6.48GHz and 2.16GHz+2.16GHz
+ * @IEEE80211_EDMG_BW_CONFIG_11: 2.16GHz, 4.32GHz, 6.48GHz, 8.64GHz and
+ *	2.16GHz+2.16GHz
+ * @IEEE80211_EDMG_BW_CONFIG_12: 2.16GHz, 2.16GHz + 2.16GHz and
+ *	4.32GHz + 4.32GHz
+ * @IEEE80211_EDMG_BW_CONFIG_13: 2.16GHz, 4.32GHz, 2.16GHz + 2.16GHz and
+ *	4.32GHz + 4.32GHz
+ * @IEEE80211_EDMG_BW_CONFIG_14: 2.16GHz, 4.32GHz, 6.48GHz, 2.16GHz + 2.16GHz
+ *	and 4.32GHz + 4.32GHz
+ * @IEEE80211_EDMG_BW_CONFIG_15: 2.16GHz, 4.32GHz, 6.48GHz, 8.64GHz,
+ *	2.16GHz + 2.16GHz and 4.32GHz + 4.32GHz
+ */
+enum ieee80211_edmg_bw_config {
+	IEEE80211_EDMG_BW_CONFIG_4	= 4,
+	IEEE80211_EDMG_BW_CONFIG_5	= 5,
+	IEEE80211_EDMG_BW_CONFIG_6	= 6,
+	IEEE80211_EDMG_BW_CONFIG_7	= 7,
+	IEEE80211_EDMG_BW_CONFIG_8	= 8,
+	IEEE80211_EDMG_BW_CONFIG_9	= 9,
+	IEEE80211_EDMG_BW_CONFIG_10	= 10,
+	IEEE80211_EDMG_BW_CONFIG_11	= 11,
+	IEEE80211_EDMG_BW_CONFIG_12	= 12,
+	IEEE80211_EDMG_BW_CONFIG_13	= 13,
+	IEEE80211_EDMG_BW_CONFIG_14	= 14,
+	IEEE80211_EDMG_BW_CONFIG_15	= 15,
+};
+
+/**
+ * struct ieee80211_edmg - EDMG configuration
+ *
+ * This structure describes most essential parameters needed
+ * to describe 802.11ay EDMG configuration
+ *
+ * @channels: bitmap that indicates the 2.16 GHz channel(s)
+ *	that are allowed to be used for transmissions.
+ *	Bit 0 indicates channel 1, bit 1 indicates channel 2, etc.
+ *	Set to 0 indicate EDMG not supported.
+ * @bw_config: Channel BW Configuration subfield encodes
+ *	the allowed channel bandwidth configurations
+ */
+struct ieee80211_edmg {
+	u8 channels;
+	enum ieee80211_edmg_bw_config bw_config;
+};
+
+/**
  * struct ieee80211_supported_band - frequency band definition
  *
  * This structure describes a frequency band a wiphy
@@ -357,6 +411,7 @@ struct ieee80211_sband_iftype_data {
  * @n_bitrates: Number of bitrates in @bitrates
  * @ht_cap: HT capabilities in this band
  * @vht_cap: VHT capabilities in this band
+ * @edmg_cap: EDMG capabilities in this band
  * @n_iftype_data: number of iftype data entries
  * @iftype_data: interface type data entries.  Note that the bits in
  *	@types_mask inside this structure cannot overlap (i.e. only
@@ -371,6 +426,7 @@ struct ieee80211_supported_band {
 	int n_bitrates;
 	struct ieee80211_sta_ht_cap ht_cap;
 	struct ieee80211_sta_vht_cap vht_cap;
+	struct ieee80211_edmg edmg_cap;
 	u16 n_iftype_data;
 	const struct ieee80211_sband_iftype_data *iftype_data;
 };
@@ -522,12 +578,17 @@ struct key_params {
  * @center_freq1: center frequency of first segment
  * @center_freq2: center frequency of second segment
  *	(only with 80+80 MHz)
+ * @edmg: define the EDMG channels configuration.
+ *	If edmg is requested (i.e. the .channels member is non-zero),
+ *	chan will define the primary channel and all other
+ *	parameters are ignored.
  */
 struct cfg80211_chan_def {
 	struct ieee80211_channel *chan;
 	enum nl80211_chan_width width;
 	u32 center_freq1;
 	u32 center_freq2;
+	struct ieee80211_edmg edmg;
 };
 
 /**
@@ -586,6 +647,19 @@ cfg80211_chandef_identical(const struct cfg80211_chan_def *chandef1,
 }
 
 /**
+ * cfg80211_chandef_is_edmg - check if chandef represents an EDMG channel
+ *
+ * @chandef: the channel definition
+ *
+ * Return: %true if EDMG defined, %false otherwise.
+ */
+static inline bool
+cfg80211_chandef_is_edmg(const struct cfg80211_chan_def *chandef)
+{
+	return chandef->edmg.channels || chandef->edmg.bw_config;
+}
+
+/**
  * cfg80211_chandef_compatible - check if two channel definitions are compatible
  * @chandef1: first channel definition
  * @chandef2: second channel definition
@@ -1124,15 +1198,17 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
  * @RATE_INFO_FLAGS_MCS: mcs field filled with HT MCS
  * @RATE_INFO_FLAGS_VHT_MCS: mcs field filled with VHT MCS
  * @RATE_INFO_FLAGS_SHORT_GI: 400ns guard interval
- * @RATE_INFO_FLAGS_60G: 60GHz MCS
+ * @RATE_INFO_FLAGS_DMG: 60GHz MCS
  * @RATE_INFO_FLAGS_HE_MCS: HE MCS information
+ * @RATE_INFO_FLAGS_EDMG: 60GHz MCS in EDMG mode
  */
 enum rate_info_flags {
 	RATE_INFO_FLAGS_MCS			= BIT(0),
 	RATE_INFO_FLAGS_VHT_MCS			= BIT(1),
 	RATE_INFO_FLAGS_SHORT_GI		= BIT(2),
-	RATE_INFO_FLAGS_60G			= BIT(3),
+	RATE_INFO_FLAGS_DMG			= BIT(3),
 	RATE_INFO_FLAGS_HE_MCS			= BIT(4),
+	RATE_INFO_FLAGS_EDMG			= BIT(5),
 };
 
 /**
@@ -1172,6 +1248,7 @@ enum rate_info_bw {
  * @he_dcm: HE DCM value
  * @he_ru_alloc: HE RU allocation (from &enum nl80211_he_ru_alloc,
  *	only valid if bw is %RATE_INFO_BW_HE_RU)
+ * @n_bonded_ch: In case of EDMG the number of bonded channels (1-4)
  */
 struct rate_info {
 	u8 flags;
@@ -1182,6 +1259,7 @@ struct rate_info {
 	u8 he_gi;
 	u8 he_dcm;
 	u8 he_ru_alloc;
+	u8 n_bonded_ch;
 };
 
 /**
@@ -2347,6 +2425,9 @@ struct cfg80211_bss_selection {
  * @fils_erp_rrk_len: Length of @fils_erp_rrk in octets.
  * @want_1x: indicates user-space supports and wants to use 802.1X driver
  *	offload of 4-way handshake.
+ * @edmg: define the EDMG channels.
+ *	This may specify multiple channels and bonding options for the driver
+ *	to choose from, based on BSS configuration.
  */
 struct cfg80211_connect_params {
 	struct ieee80211_channel *channel;
@@ -2380,6 +2461,7 @@ struct cfg80211_connect_params {
 	const u8 *fils_erp_rrk;
 	size_t fils_erp_rrk_len;
 	bool want_1x;
+	struct ieee80211_edmg edmg;
 };
 
 /**
diff --git a/include/net/cnss2.h b/include/net/cnss2.h
index 892096b..fa4690339 100644
--- a/include/net/cnss2.h
+++ b/include/net/cnss2.h
@@ -166,6 +166,11 @@ extern int cnss_self_recovery(struct device *dev,
 			      enum cnss_recovery_reason reason);
 extern int cnss_force_fw_assert(struct device *dev);
 extern int cnss_force_collect_rddm(struct device *dev);
+extern int cnss_qmi_send_get(struct device *dev);
+extern int cnss_qmi_send_put(struct device *dev);
+extern int cnss_qmi_send(struct device *dev, int type, void *cmd,
+			 int cmd_len, void *cb_ctx,
+			 int (*cb)(void *ctx, void *event, int event_len));
 extern void *cnss_get_virt_ramdump_mem(struct device *dev, unsigned long *size);
 extern int cnss_get_fw_files_for_target(struct device *dev,
 					struct cnss_fw_files *pfw_files,
diff --git a/include/soc/qcom/rmnet_ctl.h b/include/soc/qcom/rmnet_ctl.h
new file mode 100644
index 0000000..0080560
--- /dev/null
+++ b/include/soc/qcom/rmnet_ctl.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * RMNET_CTL header
+ *
+ */
+
+#ifndef _RMNET_CTL_H_
+#define _RMNET_CTL_H_
+
+#include <linux/skbuff.h>
+
+struct rmnet_ctl_client_hooks {
+	void (*ctl_dl_client_hook)(struct sk_buff *skb);
+};
+
+#ifdef CONFIG_RMNET_CTL
+
+void *rmnet_ctl_register_client(struct rmnet_ctl_client_hooks *hook);
+int rmnet_ctl_unregister_client(void *handle);
+int rmnet_ctl_send_client(void *handle, struct sk_buff *skb);
+
+#else
+
+static inline void *rmnet_ctl_register_client(
+			struct rmnet_ctl_client_hooks *hook)
+{
+	return NULL;
+}
+
+static inline int rmnet_ctl_unregister_client(void *handle)
+{
+	return -EINVAL;
+}
+
+static inline int rmnet_ctl_send_client(void *handle, struct sk_buff *skb)
+{
+	return -EINVAL;
+}
+
+#endif /* CONFIG_RMNET_CTL */
+
+#endif /* _RMNET_CTL_H_ */
diff --git a/include/soc/qcom/rmnet_qmi.h b/include/soc/qcom/rmnet_qmi.h
index 9096b10..ffcef3f 100644
--- a/include/soc/qcom/rmnet_qmi.h
+++ b/include/soc/qcom/rmnet_qmi.h
@@ -24,6 +24,7 @@ void rmnet_set_powersave_format(void *port);
 void rmnet_clear_powersave_format(void *port);
 void rmnet_get_packets(void *port, u64 *rx, u64 *tx);
 int rmnet_get_powersave_notif(void *port);
+struct net_device *rmnet_get_real_dev(void *port);
 #else
 static inline void *rmnet_get_qmi_pt(void *port)
 {
@@ -76,5 +77,9 @@ static inline int rmnet_get_powersave_notif(void *port)
 	return 0;
 }
 
+static inline struct net_device *rmnet_get_real_dev(void *port)
+{
+	return NULL;
+}
 #endif /* CONFIG_QCOM_QMI_RMNET */
 #endif /*_RMNET_QMI_H*/
diff --git a/include/soc/qcom/secure_buffer.h b/include/soc/qcom/secure_buffer.h
index 02713d1..82845d9 100644
--- a/include/soc/qcom/secure_buffer.h
+++ b/include/soc/qcom/secure_buffer.h
@@ -41,6 +41,18 @@ enum vmid {
 #define PERM_WRITE                      0x2
 #define PERM_EXEC			0x1
 
+struct dest_vm_and_perm_info {
+	u32 vm;
+	u32 perm;
+	u64 ctx;
+	u32 ctx_size;
+};
+
+struct mem_prot_info {
+	phys_addr_t addr;
+	u64 size;
+};
+
 #ifdef CONFIG_QCOM_SECURE_BUFFER
 int msm_secure_table(struct sg_table *table);
 int msm_unsecure_table(struct sg_table *table);
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index b52d4a0..78a2291 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -177,10 +177,7 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
 	if (snd_BUG_ON(!stream))
 		return;
 
-	if (stream->direction == SND_COMPRESS_PLAYBACK)
-		stream->runtime->state = SNDRV_PCM_STATE_SETUP;
-	else
-		stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
+	stream->runtime->state = SNDRV_PCM_STATE_SETUP;
 
 	wake_up(&stream->runtime->sleep);
 }
diff --git a/include/trace/events/dfc.h b/include/trace/events/dfc.h
index 375a156..fb092bb 100644
--- a/include/trace/events/dfc.h
+++ b/include/trace/events/dfc.h
@@ -236,6 +236,29 @@ TRACE_EVENT(dfc_tx_link_status_ind,
 		__entry->mid, __entry->bid)
 );
 
+TRACE_EVENT(dfc_qmap,
+
+	TP_PROTO(const void *data, size_t len, bool in),
+
+	TP_ARGS(data, len, in),
+
+	TP_STRUCT__entry(
+		__field(bool, in)
+		__field(size_t, len)
+		__dynamic_array(u8, data, len)
+	),
+
+	TP_fast_assign(
+		__entry->in = in;
+		__entry->len = len;
+		memcpy(__get_dynamic_array(data), data, len);
+	),
+
+	TP_printk("%s [%s]",
+		__entry->in ? "<--" : "-->",
+		__print_hex(__get_dynamic_array(data), __entry->len))
+);
+
 #endif /* _TRACE_DFC_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 147546e..815dcfa 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -500,10 +500,10 @@ rxrpc_tx_points;
 #define E_(a, b)	{ a, b }
 
 TRACE_EVENT(rxrpc_local,
-	    TP_PROTO(struct rxrpc_local *local, enum rxrpc_local_trace op,
+	    TP_PROTO(unsigned int local_debug_id, enum rxrpc_local_trace op,
 		     int usage, const void *where),
 
-	    TP_ARGS(local, op, usage, where),
+	    TP_ARGS(local_debug_id, op, usage, where),
 
 	    TP_STRUCT__entry(
 		    __field(unsigned int,	local		)
@@ -513,7 +513,7 @@ TRACE_EVENT(rxrpc_local,
 			     ),
 
 	    TP_fast_assign(
-		    __entry->local = local->debug_id;
+		    __entry->local = local_debug_id;
 		    __entry->op = op;
 		    __entry->usage = usage;
 		    __entry->where = where;
diff --git a/include/trace/events/walt.h b/include/trace/events/walt.h
index 1cf2bf9..13cd0b3 100644
--- a/include/trace/events/walt.h
+++ b/include/trace/events/walt.h
@@ -491,9 +491,10 @@ TRACE_EVENT(sched_load_to_gov,
 
 	TP_PROTO(struct rq *rq, u64 aggr_grp_load, u32 tt_load,
 		int freq_aggr, u64 load, int policy,
-		int big_task_rotation),
+		int big_task_rotation,
+		unsigned int user_hint),
 	TP_ARGS(rq, aggr_grp_load, tt_load, freq_aggr, load, policy,
-		big_task_rotation),
+		big_task_rotation, user_hint),
 
 	TP_STRUCT__entry(
 		__field(int,	cpu)
@@ -509,6 +510,7 @@ TRACE_EVENT(sched_load_to_gov,
 		__field(u64,	pl)
 		__field(u64,    load)
 		__field(int,    big_task_rotation)
+		__field(unsigned int, user_hint)
 	),
 
 	TP_fast_assign(
@@ -526,13 +528,14 @@ TRACE_EVENT(sched_load_to_gov,
 					rq->walt_stats.pred_demands_sum_scaled;
 		__entry->load		= load;
 		__entry->big_task_rotation = big_task_rotation;
+		__entry->user_hint = user_hint;
 	),
 
-	TP_printk("cpu=%d policy=%d ed_task_pid=%d aggr_grp_load=%llu freq_aggr=%d tt_load=%llu rq_ps=%llu grp_rq_ps=%llu nt_ps=%llu grp_nt_ps=%llu pl=%llu load=%llu big_task_rotation=%d",
+	TP_printk("cpu=%d policy=%d ed_task_pid=%d aggr_grp_load=%llu freq_aggr=%d tt_load=%llu rq_ps=%llu grp_rq_ps=%llu nt_ps=%llu grp_nt_ps=%llu pl=%llu load=%llu big_task_rotation=%d user_hint=%u",
 		__entry->cpu, __entry->policy, __entry->ed_task_pid,
 		__entry->aggr_grp_load, __entry->freq_aggr,
 		__entry->tt_load, __entry->rq_ps, __entry->grp_rq_ps,
 		__entry->nt_ps, __entry->grp_nt_ps, __entry->pl, __entry->load,
-		__entry->big_task_rotation)
+		__entry->big_task_rotation, __entry->user_hint)
 );
 #endif
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index a1953ab..a137403 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -126,6 +126,7 @@
 #define IPA_IOCTL_FNR_COUNTER_ALLOC             74
 #define IPA_IOCTL_FNR_COUNTER_DEALLOC           75
 #define IPA_IOCTL_FNR_COUNTER_QUERY             76
+#define IPA_IOCTL_SET_FNR_COUNTER_INFO          77
 
 /**
  * max size of the header to be inserted
@@ -2523,6 +2524,24 @@ struct ipa_odl_modem_config {
 	__u8 config_status;
 };
 
+struct ipa_ioc_fnr_index_info {
+	uint8_t hw_counter_offset;
+	uint8_t sw_counter_offset;
+};
+
+enum ipacm_hw_index_counter_type {
+	UL_HW = 0,
+	DL_HW,
+	DL_ALL,
+	UL_ALL,
+};
+
+enum ipacm_hw_index_counter_virtual_type {
+	UL_HW_CACHE = 0,
+	DL_HW_CACHE,
+	UL_WLAN_TX,
+	DL_WLAN_TX
+};
 
 /**
  *   actual IOCTLs supported by IPA driver
@@ -2773,6 +2792,10 @@ struct ipa_odl_modem_config {
 				IPA_IOCTL_FNR_COUNTER_QUERY, \
 				struct ipa_ioc_flt_rt_query)
 
+#define IPA_IOC_SET_FNR_COUNTER_INFO _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_SET_FNR_COUNTER_INFO, \
+				struct ipa_ioc_fnr_index_info)
+
 /*
  * unique magic number of the Tethering bridge ioctls
  */
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 16bf3a5..2f938db 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -52,6 +52,11 @@
 #define NL80211_MULTICAST_GROUP_NAN		"nan"
 #define NL80211_MULTICAST_GROUP_TESTMODE	"testmode"
 
+#define NL80211_EDMG_BW_CONFIG_MIN	4
+#define NL80211_EDMG_BW_CONFIG_MAX	15
+#define NL80211_EDMG_CHANNELS_MIN	1
+#define NL80211_EDMG_CHANNELS_MAX	0x3c /* 0b00111100 */
+
 /**
  * DOC: Station handling
  *
@@ -2288,6 +2293,52 @@ enum nl80211_commands {
  *	association request when used with NL80211_CMD_NEW_STATION). Can be set
  *	only if %NL80211_STA_FLAG_WME is set.
  *
+ * @NL80211_ATTR_FTM_RESPONDER: nested attribute which user-space can include
+ *      in %NL80211_CMD_START_AP or %NL80211_CMD_SET_BEACON for fine timing
+ *      measurement (FTM) responder functionality and containing parameters as
+ *      possible, see &enum nl80211_ftm_responder_attr
+ *
+ * @NL80211_ATTR_FTM_RESPONDER_STATS: Nested attribute with FTM responder
+ *      statistics, see &enum nl80211_ftm_responder_stats.
+ *
+ * @NL80211_ATTR_TIMEOUT: Timeout for the given operation in milliseconds (u32),
+ *      if the attribute is not given no timeout is requested. Note that 0 is an
+ *      invalid value.
+ *
+ * @NL80211_ATTR_PEER_MEASUREMENTS: peer measurements request (and result)
+ *      data, uses nested attributes specified in
+ *      &enum nl80211_peer_measurement_attrs.
+ *      This is also used for capability advertisement in the wiphy information,
+ *      with the appropriate sub-attributes.
+ *
+ * @NL80211_ATTR_AIRTIME_WEIGHT: Station's weight when scheduled by the airtime
+ *      scheduler.
+ *
+ * @NL80211_ATTR_STA_TX_POWER_SETTING: Transmit power setting type (u8) for
+ *      station associated with the AP. See &enum nl80211_tx_power_setting for
+ *      possible values.
+ * @NL80211_ATTR_STA_TX_POWER: Transmit power level (s16) in dBm units. This
+ *      allows to set Tx power for a station. If this attribute is not included,
+ *      the default per-interface tx power setting will be overriding. Driver
+ *      should be picking up the lowest tx power, either tx power per-interface
+ *      or per-station.
+ *
+ * @NL80211_ATTR_SAE_PASSWORD: attribute for passing SAE password material. It
+ *      is used with %NL80211_CMD_CONNECT to provide password for offloading
+ *      SAE authentication for WPA3-Personal networks.
+ *
+ * @NL80211_ATTR_TWT_RESPONDER: Enable target wait time responder support.
+ *
+ * @NL80211_ATTR_HE_OBSS_PD: nested attribute for OBSS Packet Detection
+ *      functionality.
+ *
+ * @NL80211_ATTR_WIPHY_EDMG_CHANNELS: bitmap that indicates the 2.16 GHz
+ *      channel(s) that are allowed to be used for EDMG transmissions.
+ *      Defined by IEEE P802.11ay/D4.0 section 9.4.2.251. (u8 attribute)
+ * @NL80211_ATTR_WIPHY_EDMG_BW_CONFIG: Channel BW Configuration subfield encodes
+ *      the allowed channel bandwidth configurations. (u8 attribute)
+ *      Defined by IEEE P802.11ay/D4.0 section 9.4.2.251, Table 13.
+ *
  * @NUM_NL80211_ATTR: total number of nl80211_attrs available
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2729,6 +2780,27 @@ enum nl80211_attrs {
 
 	NL80211_ATTR_HE_CAPABILITY,
 
+	NL80211_ATTR_FTM_RESPONDER,
+
+	NL80211_ATTR_FTM_RESPONDER_STATS,
+
+	NL80211_ATTR_TIMEOUT,
+
+	NL80211_ATTR_PEER_MEASUREMENTS,
+
+	NL80211_ATTR_AIRTIME_WEIGHT,
+	NL80211_ATTR_STA_TX_POWER_SETTING,
+	NL80211_ATTR_STA_TX_POWER,
+
+	NL80211_ATTR_SAE_PASSWORD,
+
+	NL80211_ATTR_TWT_RESPONDER,
+
+	NL80211_ATTR_HE_OBSS_PD,
+
+	NL80211_ATTR_WIPHY_EDMG_CHANNELS,
+	NL80211_ATTR_WIPHY_EDMG_BW_CONFIG,
+
 	/* add attributes here, update the policy in nl80211.c */
 
 	__NL80211_ATTR_AFTER_LAST,
@@ -2779,7 +2851,7 @@ enum nl80211_attrs {
 #define NL80211_HT_CAPABILITY_LEN		26
 #define NL80211_VHT_CAPABILITY_LEN		12
 #define NL80211_HE_MIN_CAPABILITY_LEN           16
-#define NL80211_HE_MAX_CAPABILITY_LEN           51
+#define NL80211_HE_MAX_CAPABILITY_LEN           54
 #define NL80211_MAX_NR_CIPHER_SUITES		5
 #define NL80211_MAX_NR_AKM_SUITES		2
 
@@ -3320,6 +3392,12 @@ enum nl80211_band_iftype_attr {
  * @NL80211_BAND_ATTR_VHT_CAPA: VHT capabilities, as in the HT information IE
  * @NL80211_BAND_ATTR_IFTYPE_DATA: nested array attribute, with each entry using
  *	attributes from &enum nl80211_band_iftype_attr
+ * @NL80211_BAND_ATTR_EDMG_CHANNELS: bitmap that indicates the 2.16 GHz
+ *      channel(s) that are allowed to be used for EDMG transmissions.
+ *      Defined by IEEE P802.11ay/D4.0 section 9.4.2.251.
+ * @NL80211_BAND_ATTR_EDMG_BW_CONFIG: Channel BW Configuration subfield encodes
+ *      the allowed channel bandwidth configurations.
+ *      Defined by IEEE P802.11ay/D4.0 section 9.4.2.251, Table 13.
  * @NL80211_BAND_ATTR_MAX: highest band attribute currently defined
  * @__NL80211_BAND_ATTR_AFTER_LAST: internal use
  */
@@ -3337,6 +3415,9 @@ enum nl80211_band_attr {
 	NL80211_BAND_ATTR_VHT_CAPA,
 	NL80211_BAND_ATTR_IFTYPE_DATA,
 
+	NL80211_BAND_ATTR_EDMG_CHANNELS,
+	NL80211_BAND_ATTR_EDMG_BW_CONFIG,
+
 	/* keep last */
 	__NL80211_BAND_ATTR_AFTER_LAST,
 	NL80211_BAND_ATTR_MAX = __NL80211_BAND_ATTR_AFTER_LAST - 1
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 888d93c..6d7d708 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -11292,7 +11292,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
 		goto err_unlock;
 	}
 
-	perf_install_in_context(ctx, event, cpu);
+	perf_install_in_context(ctx, event, event->cpu);
 	perf_unpin_context(ctx);
 	mutex_unlock(&ctx->mutex);
 
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 8e009cee..26814a1 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -294,6 +294,18 @@ static void irq_sysfs_add(int irq, struct irq_desc *desc)
 	}
 }
 
+static void irq_sysfs_del(struct irq_desc *desc)
+{
+	/*
+	 * If irq_sysfs_init() has not yet been invoked (early boot), then
+	 * irq_kobj_base is NULL and the descriptor was never added.
+	 * kobject_del() complains about a object with no parent, so make
+	 * it conditional.
+	 */
+	if (irq_kobj_base)
+		kobject_del(&desc->kobj);
+}
+
 static int __init irq_sysfs_init(void)
 {
 	struct irq_desc *desc;
@@ -324,6 +336,7 @@ static struct kobj_type irq_kobj_type = {
 };
 
 static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
+static void irq_sysfs_del(struct irq_desc *desc) {}
 
 #endif /* CONFIG_SYSFS */
 
@@ -437,7 +450,7 @@ static void free_desc(unsigned int irq)
 	 * The sysfs entry must be serialized against a concurrent
 	 * irq_sysfs_init() as well.
 	 */
-	kobject_del(&desc->kobj);
+	irq_sysfs_del(desc);
 	delete_irq_desc(irq);
 
 	/*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5a29adf..952827e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2008,7 +2008,7 @@ static inline void walt_try_to_wake_up(struct task_struct *p)
 
 	rcu_read_lock();
 	grp = task_related_thread_group(p);
-	if (update_preferred_cluster(grp, p, old_load))
+	if (update_preferred_cluster(grp, p, old_load, false))
 		set_preferred_cluster(grp);
 	rcu_read_unlock();
 }
@@ -3203,7 +3203,7 @@ void scheduler_tick(void)
 
 	rcu_read_lock();
 	grp = task_related_thread_group(curr);
-	if (update_preferred_cluster(grp, curr, old_load))
+	if (update_preferred_cluster(grp, curr, old_load, true))
 		set_preferred_cluster(grp);
 	rcu_read_unlock();
 
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 31decf0..35e8185 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -56,6 +56,7 @@ struct sugov_policy {
 	struct task_struct	*thread;
 	bool			work_in_progress;
 
+	bool			limits_changed;
 	bool			need_freq_update;
 };
 
@@ -113,8 +114,11 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
 	    !cpufreq_this_cpu_can_update(sg_policy->policy))
 		return false;
 
-	if (unlikely(sg_policy->need_freq_update))
+	if (unlikely(sg_policy->limits_changed)) {
+		sg_policy->limits_changed = false;
+		sg_policy->need_freq_update = true;
 		return true;
+	}
 
 	/* No need to recalculate next freq for min_rate_limit_us
 	 * at least. However we might still decide to further rate
@@ -595,7 +599,7 @@ static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util,
 static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
 {
 	if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
-		sg_policy->need_freq_update = true;
+		sg_policy->limits_changed = true;
 }
 
 static inline unsigned long target_util(struct sugov_policy *sg_policy,
@@ -628,7 +632,9 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
 	if (!sugov_should_update_freq(sg_policy, time))
 		return;
 
-	busy = use_pelt() && sugov_cpu_is_busy(sg_cpu);
+	/* Limits may have changed, don't skip frequency update */
+	busy = use_pelt() && !sg_policy->need_freq_update &&
+		sugov_cpu_is_busy(sg_cpu);
 
 	sg_cpu->util = util = sugov_get_util(sg_cpu);
 	max = sg_cpu->max;
@@ -1286,6 +1292,7 @@ static int sugov_start(struct cpufreq_policy *policy)
 	sg_policy->last_freq_update_time	= 0;
 	sg_policy->next_freq			= 0;
 	sg_policy->work_in_progress		= false;
+	sg_policy->limits_changed		= false;
 	sg_policy->need_freq_update		= false;
 	sg_policy->cached_raw_freq		= 0;
 
@@ -1356,7 +1363,7 @@ static void sugov_limits(struct cpufreq_policy *policy)
 		raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
 	}
 
-	sg_policy->need_freq_update = true;
+	sg_policy->limits_changed = true;
 }
 
 static struct cpufreq_governor schedutil_gov = {
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e682959..ad12150 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3912,7 +3912,8 @@ static inline bool task_fits_max(struct task_struct *p, int cpu)
 	if (is_min_capacity_cpu(cpu)) {
 		if (task_boost_policy(p) == SCHED_BOOST_ON_BIG ||
 			task_boost > 0 ||
-			schedtune_task_boost(p) > 0)
+			schedtune_task_boost(p) > 0 ||
+			walt_should_kick_upmigrate(p, cpu))
 			return false;
 	} else { /* mid cap cpu */
 		if (task_boost > 1)
@@ -8745,7 +8746,17 @@ static int detach_tasks(struct lb_env *env)
 		if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
 			goto next;
 
-		if ((load / 2) > env->imbalance)
+		/*
+		 * p is not running task when we goes until here, so if p is one
+		 * of the 2 task in src cpu rq and not the running one,
+		 * that means it is the only task that can be balanced.
+		 * So only when there is other tasks can be balanced or
+		 * there is situation to ignore big task, it is needed
+		 * to skip the task load bigger than 2*imbalance.
+		 */
+		if (((cpu_rq(env->src_cpu)->nr_running > 2) ||
+			(env->flags & LBF_IGNORE_BIG_TASKS)) &&
+			((load / 2) > env->imbalance))
 			goto next;
 
 		detach_task(p, env);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b7af759..2b1410e 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2680,7 +2680,7 @@ extern unsigned int  __read_mostly sched_load_granule;
 
 extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
 extern int update_preferred_cluster(struct related_thread_group *grp,
-			struct task_struct *p, u32 old_load);
+			struct task_struct *p, u32 old_load, bool from_tick);
 extern void set_preferred_cluster(struct related_thread_group *grp);
 extern void add_new_task_to_grp(struct task_struct *new);
 
@@ -2995,7 +2995,7 @@ static inline u32 task_load(struct task_struct *p) { return 0; }
 static inline u32 task_pl(struct task_struct *p) { return 0; }
 
 static inline int update_preferred_cluster(struct related_thread_group *grp,
-			 struct task_struct *p, u32 old_load)
+			 struct task_struct *p, u32 old_load, bool from_tick)
 {
 	return 0;
 }
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 2bedd91..e5f1c4e 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -475,6 +475,19 @@ static u32  top_task_load(struct rq *rq)
 	}
 }
 
+unsigned int sysctl_sched_user_hint;
+static unsigned long sched_user_hint_reset_time;
+static bool is_cluster_hosting_top_app(struct sched_cluster *cluster);
+
+static inline bool should_apply_suh_freq_boost(struct sched_cluster *cluster)
+{
+	if (sched_freq_aggr_en || !sysctl_sched_user_hint ||
+				  !cluster->aggr_grp_load)
+		return false;
+
+	return is_cluster_hosting_top_app(cluster);
+}
+
 static inline u64 freq_policy_load(struct rq *rq)
 {
 	unsigned int reporting_policy = sysctl_sched_freq_reporting_policy;
@@ -510,9 +523,18 @@ static inline u64 freq_policy_load(struct rq *rq)
 		break;
 	}
 
+	if (should_apply_suh_freq_boost(cluster)) {
+		if (is_suh_max())
+			load = sched_ravg_window;
+		else
+			load = div64_u64(load * sysctl_sched_user_hint,
+					 (u64)100);
+	}
+
 done:
 	trace_sched_load_to_gov(rq, aggr_grp_load, tt_load, sched_freq_aggr_en,
-				load, reporting_policy, walt_rotation_enabled);
+				load, reporting_policy, walt_rotation_enabled,
+				sysctl_sched_user_hint);
 	return load;
 }
 
@@ -2607,6 +2629,9 @@ void update_best_cluster(struct related_thread_group *grp,
 		return;
 	}
 
+	if (is_suh_max())
+		demand = sched_group_upmigrate;
+
 	if (!grp->skip_min) {
 		if (demand >= sched_group_upmigrate) {
 			grp->skip_min = true;
@@ -2703,13 +2728,16 @@ void set_preferred_cluster(struct related_thread_group *grp)
 }
 
 int update_preferred_cluster(struct related_thread_group *grp,
-		struct task_struct *p, u32 old_load)
+		struct task_struct *p, u32 old_load, bool from_tick)
 {
 	u32 new_load = task_load(p);
 
 	if (!grp)
 		return 0;
 
+	if (unlikely(from_tick && is_suh_max()))
+		return 1;
+
 	/*
 	 * Update if task's load has changed significantly or a complete window
 	 * has passed since we last updated preference
@@ -2724,8 +2752,6 @@ int update_preferred_cluster(struct related_thread_group *grp,
 #define ADD_TASK	0
 #define REM_TASK	1
 
-#define DEFAULT_CGROUP_COLOC_ID 1
-
 static inline struct related_thread_group*
 lookup_related_thread_group(unsigned int group_id)
 {
@@ -2963,6 +2989,22 @@ int sync_cgroup_colocation(struct task_struct *p, bool insert)
 }
 #endif
 
+static bool is_cluster_hosting_top_app(struct sched_cluster *cluster)
+{
+	struct related_thread_group *grp;
+	bool grp_on_min;
+
+	grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID);
+
+	if (!grp)
+		return false;
+
+	grp_on_min = !grp->skip_min &&
+		     (sched_boost_policy() != SCHED_BOOST_ON_BIG);
+
+	return (is_min_capacity_cluster(cluster) == grp_on_min);
+}
+
 static unsigned long max_cap[NR_CPUS];
 static unsigned long thermal_cap_cpu[NR_CPUS];
 
@@ -3223,6 +3265,10 @@ void walt_irq_work(struct irq_work *irq_work)
 		rtgb_active = false;
 	}
 
+	if (!is_migration && sysctl_sched_user_hint && time_after(jiffies,
+					sched_user_hint_reset_time))
+		sysctl_sched_user_hint = 0;
+
 	for_each_sched_cluster(cluster) {
 		cpumask_t cluster_online_cpus;
 		unsigned int num_cpus, i = 1;
@@ -3435,3 +3481,26 @@ void walt_sched_init_rq(struct rq *rq)
 	rq->cum_window_demand_scaled = 0;
 	rq->notif_pending = false;
 }
+
+int walt_proc_user_hint_handler(struct ctl_table *table,
+				int write, void __user *buffer, size_t *lenp,
+				loff_t *ppos)
+{
+	int ret;
+	unsigned int old_value;
+	static DEFINE_MUTEX(mutex);
+
+	mutex_lock(&mutex);
+
+	sched_user_hint_reset_time = jiffies + HZ;
+	old_value = sysctl_sched_user_hint;
+	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+	if (ret || !write || (old_value == sysctl_sched_user_hint))
+		goto unlock;
+
+	irq_work_queue(&walt_migration_irq_work);
+
+unlock:
+	mutex_unlock(&mutex);
+	return ret;
+}
diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h
index 152a9df..e89dc2f 100644
--- a/kernel/sched/walt.h
+++ b/kernel/sched/walt.h
@@ -306,6 +306,23 @@ static inline void walt_enable_frequency_aggregation(bool enable)
 	sched_freq_aggr_en = enable;
 }
 
+static inline bool is_suh_max(void)
+{
+	return sysctl_sched_user_hint == sched_user_hint_max;
+}
+
+#define DEFAULT_CGROUP_COLOC_ID 1
+static inline bool walt_should_kick_upmigrate(struct task_struct *p, int cpu)
+{
+	struct related_thread_group *rtg = p->grp;
+
+	if (is_suh_max() && rtg && rtg->id == DEFAULT_CGROUP_COLOC_ID &&
+			    rtg->skip_min && p->unfilter)
+		return is_min_capacity_cpu(cpu);
+
+	return false;
+}
+
 #else /* CONFIG_SCHED_WALT */
 
 static inline void walt_sched_init_rq(struct rq *rq) { }
@@ -386,6 +403,12 @@ static inline u64 sched_irqload(int cpu)
 {
 	return 0;
 }
+
+static inline bool walt_should_kick_upmigrate(struct task_struct *p, int cpu)
+{
+	return false;
+}
+
 #endif /* CONFIG_SCHED_WALT */
 
 #endif
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 85847b6..a9849cd 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -141,6 +141,9 @@ static int six_hundred_forty_kb = 640 * 1024;
 #endif
 static int two_hundred_fifty_five = 255;
 
+#ifdef CONFIG_SCHED_WALT
+const int sched_user_hint_max = 1000;
+#endif
 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
 static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
 
@@ -345,6 +348,15 @@ static struct ctl_table kern_table[] = {
 #endif
 #ifdef CONFIG_SCHED_WALT
 	{
+		.procname	= "sched_user_hint",
+		.data           = &sysctl_sched_user_hint,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= walt_proc_user_hint_handler,
+		.extra1		= &zero,
+		.extra2		= (void *)&sched_user_hint_max,
+	},
+	{
 		.procname       = "sched_cpu_high_irqload",
 		.data           = &sysctl_sched_cpu_high_irqload,
 		.maxlen         = sizeof(unsigned int),
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index fd48a15..a74b1aa 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -894,8 +894,11 @@ static int __init test_firmware_init(void)
 		return -ENOMEM;
 
 	rc = __test_firmware_config_init();
-	if (rc)
+	if (rc) {
+		kfree(test_fw_config);
+		pr_err("could not init firmware test config: %d\n", rc);
 		return rc;
+	}
 
 	rc = misc_register(&test_fw_misc_device);
 	if (rc) {
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index a21b2ca..8dbf67f 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -33,6 +33,7 @@
 #include <linux/page_idle.h>
 #include <linux/shmem_fs.h>
 #include <linux/oom.h>
+#include <linux/page_owner.h>
 
 #include <asm/tlb.h>
 #include <asm/pgalloc.h>
@@ -2478,6 +2479,9 @@ static void __split_huge_page(struct page *page, struct list_head *list,
 	}
 
 	ClearPageCompound(head);
+
+	split_page_owner(head, HPAGE_PMD_ORDER);
+
 	/* See comment in __split_huge_page_tail() */
 	if (PageAnon(head)) {
 		/* Additional pin to radix tree of swap cache */
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index aa0338c..1589165 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -126,7 +126,7 @@
 /* GFP bitmask for kmemleak internal allocations */
 #define gfp_kmemleak_mask(gfp)	(((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
 				 __GFP_NORETRY | __GFP_NOMEMALLOC | \
-				 __GFP_NOWARN | __GFP_NOFAIL)
+				 __GFP_NOWARN)
 
 /* scanning area inside a memory block */
 struct kmemleak_scan_area {
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7e7cc0c..ecde75f 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1037,26 +1037,45 @@ void mem_cgroup_iter_break(struct mem_cgroup *root,
 		css_put(&prev->css);
 }
 
-static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
+static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
+					struct mem_cgroup *dead_memcg)
 {
-	struct mem_cgroup *memcg = dead_memcg;
 	struct mem_cgroup_reclaim_iter *iter;
 	struct mem_cgroup_per_node *mz;
 	int nid;
 	int i;
 
-	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
-		for_each_node(nid) {
-			mz = mem_cgroup_nodeinfo(memcg, nid);
-			for (i = 0; i <= DEF_PRIORITY; i++) {
-				iter = &mz->iter[i];
-				cmpxchg(&iter->position,
-					dead_memcg, NULL);
-			}
+	for_each_node(nid) {
+		mz = mem_cgroup_nodeinfo(from, nid);
+		for (i = 0; i <= DEF_PRIORITY; i++) {
+			iter = &mz->iter[i];
+			cmpxchg(&iter->position,
+				dead_memcg, NULL);
 		}
 	}
 }
 
+static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
+{
+	struct mem_cgroup *memcg = dead_memcg;
+	struct mem_cgroup *last;
+
+	do {
+		__invalidate_reclaim_iterators(memcg, dead_memcg);
+		last = memcg;
+	} while ((memcg = parent_mem_cgroup(memcg)));
+
+	/*
+	 * When cgruop1 non-hierarchy mode is used,
+	 * parent_mem_cgroup() does not walk all the way up to the
+	 * cgroup root (root_mem_cgroup). So we have to handle
+	 * dead_memcg from cgroup root separately.
+	 */
+	if (last != root_mem_cgroup)
+		__invalidate_reclaim_iterators(root_mem_cgroup,
+						dead_memcg);
+}
+
 /**
  * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
  * @memcg: hierarchy root
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index d78b843..4b81d09 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -406,7 +406,7 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
 	},
 };
 
-static void migrate_page_add(struct page *page, struct list_head *pagelist,
+static int migrate_page_add(struct page *page, struct list_head *pagelist,
 				unsigned long flags);
 
 struct queue_pages {
@@ -432,11 +432,14 @@ static inline bool queue_pages_required(struct page *page,
 }
 
 /*
- * queue_pages_pmd() has three possible return values:
- * 1 - pages are placed on the right node or queued successfully.
- * 0 - THP was split.
- * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing
- *        page was already on a node that does not follow the policy.
+ * queue_pages_pmd() has four possible return values:
+ * 0 - pages are placed on the right node or queued successfully.
+ * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
+ *     specified.
+ * 2 - THP was split.
+ * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
+ *        existing page was already on a node that does not follow the
+ *        policy.
  */
 static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
 				unsigned long end, struct mm_walk *walk)
@@ -454,23 +457,20 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
 	if (is_huge_zero_page(page)) {
 		spin_unlock(ptl);
 		__split_huge_pmd(walk->vma, pmd, addr, false, NULL);
+		ret = 2;
 		goto out;
 	}
-	if (!queue_pages_required(page, qp)) {
-		ret = 1;
+	if (!queue_pages_required(page, qp))
 		goto unlock;
-	}
 
-	ret = 1;
 	flags = qp->flags;
 	/* go to thp migration */
 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
-		if (!vma_migratable(walk->vma)) {
-			ret = -EIO;
+		if (!vma_migratable(walk->vma) ||
+		    migrate_page_add(page, qp->pagelist, flags)) {
+			ret = 1;
 			goto unlock;
 		}
-
-		migrate_page_add(page, qp->pagelist, flags);
 	} else
 		ret = -EIO;
 unlock:
@@ -482,6 +482,13 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
 /*
  * Scan through pages checking if pages follow certain conditions,
  * and move them to the pagelist if they do.
+ *
+ * queue_pages_pte_range() has three possible return values:
+ * 0 - pages are placed on the right node or queued successfully.
+ * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
+ *     specified.
+ * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
+ *        on a node that does not follow the policy.
  */
 static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
 			unsigned long end, struct mm_walk *walk)
@@ -491,17 +498,17 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
 	struct queue_pages *qp = walk->private;
 	unsigned long flags = qp->flags;
 	int ret;
+	bool has_unmovable = false;
 	pte_t *pte;
 	spinlock_t *ptl;
 
 	ptl = pmd_trans_huge_lock(pmd, vma);
 	if (ptl) {
 		ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
-		if (ret > 0)
-			return 0;
-		else if (ret < 0)
+		if (ret != 2)
 			return ret;
 	}
+	/* THP was split, fall through to pte walk */
 
 	if (pmd_trans_unstable(pmd))
 		return 0;
@@ -522,14 +529,28 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
 		if (!queue_pages_required(page, qp))
 			continue;
 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
-			if (!vma_migratable(vma))
+			/* MPOL_MF_STRICT must be specified if we get here */
+			if (!vma_migratable(vma)) {
+				has_unmovable = true;
 				break;
-			migrate_page_add(page, qp->pagelist, flags);
+			}
+
+			/*
+			 * Do not abort immediately since there may be
+			 * temporary off LRU pages in the range.  Still
+			 * need migrate other LRU pages.
+			 */
+			if (migrate_page_add(page, qp->pagelist, flags))
+				has_unmovable = true;
 		} else
 			break;
 	}
 	pte_unmap_unlock(pte - 1, ptl);
 	cond_resched();
+
+	if (has_unmovable)
+		return 1;
+
 	return addr != end ? -EIO : 0;
 }
 
@@ -644,7 +665,13 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
  *
  * If pages found in a given range are on a set of nodes (determined by
  * @nodes and @flags,) it's isolated and queued to the pagelist which is
- * passed via @private.)
+ * passed via @private.
+ *
+ * queue_pages_range() has three possible return values:
+ * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
+ *     specified.
+ * 0 - queue pages successfully or no misplaced page.
+ * -EIO - there is misplaced page and only MPOL_MF_STRICT was specified.
  */
 static int
 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
@@ -939,7 +966,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
 /*
  * page migration, thp tail pages can be passed.
  */
-static void migrate_page_add(struct page *page, struct list_head *pagelist,
+static int migrate_page_add(struct page *page, struct list_head *pagelist,
 				unsigned long flags)
 {
 	struct page *head = compound_head(page);
@@ -952,8 +979,19 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
 			mod_node_page_state(page_pgdat(head),
 				NR_ISOLATED_ANON + page_is_file_cache(head),
 				hpage_nr_pages(head));
+		} else if (flags & MPOL_MF_STRICT) {
+			/*
+			 * Non-movable page may reach here.  And, there may be
+			 * temporary off LRU pages or non-LRU movable pages.
+			 * Treat them as unmovable pages since they can't be
+			 * isolated, so they can't be moved at the moment.  It
+			 * should return -EIO for this case too.
+			 */
+			return -EIO;
 		}
 	}
+
+	return 0;
 }
 
 /* page allocation callback for NUMA node migration */
@@ -1156,9 +1194,10 @@ static struct page *new_page(struct page *page, unsigned long start)
 }
 #else
 
-static void migrate_page_add(struct page *page, struct list_head *pagelist,
+static int migrate_page_add(struct page *page, struct list_head *pagelist,
 				unsigned long flags)
 {
+	return -EIO;
 }
 
 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
@@ -1181,6 +1220,7 @@ static long do_mbind(unsigned long start, unsigned long len,
 	struct mempolicy *new;
 	unsigned long end;
 	int err;
+	int ret;
 	LIST_HEAD(pagelist);
 
 	if (flags & ~(unsigned long)MPOL_MF_VALID)
@@ -1242,10 +1282,15 @@ static long do_mbind(unsigned long start, unsigned long len,
 	if (err)
 		goto mpol_out;
 
-	err = queue_pages_range(mm, start, end, nmask,
+	ret = queue_pages_range(mm, start, end, nmask,
 			  flags | MPOL_MF_INVERT, &pagelist);
-	if (!err)
-		err = mbind_range(mm, start, end, new);
+
+	if (ret < 0) {
+		err = -EIO;
+		goto up_out;
+	}
+
+	err = mbind_range(mm, start, end, new);
 
 	if (!err) {
 		int nr_failed = 0;
@@ -1258,13 +1303,14 @@ static long do_mbind(unsigned long start, unsigned long len,
 				putback_movable_pages(&pagelist);
 		}
 
-		if (nr_failed && (flags & MPOL_MF_STRICT))
+		if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
 			err = -EIO;
 	} else
 		putback_movable_pages(&pagelist);
 
+up_out:
 	up_write(&mm->mmap_sem);
- mpol_out:
+mpol_out:
 	mpol_put(new);
 	return err;
 }
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index e75789d..4272af2 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -1223,6 +1223,10 @@ void pagefault_out_of_memory(void)
 		.order = 0,
 	};
 
+	if (IS_ENABLED(CONFIG_HAVE_LOW_MEMORY_KILLER) ||
+	    IS_ENABLED(CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER))
+		return;
+
 	if (mem_cgroup_oom_synchronize(true))
 		return;
 
diff --git a/mm/rmap.c b/mm/rmap.c
index a77f9b2..94e2488 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1466,7 +1466,15 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 			/*
 			 * No need to invalidate here it will synchronize on
 			 * against the special swap migration pte.
+			 *
+			 * The assignment to subpage above was computed from a
+			 * swap PTE which results in an invalid pointer.
+			 * Since only PAGE_SIZE pages can currently be
+			 * migrated, just set it to page. This will need to be
+			 * changed when hugepage migrations to device private
+			 * memory are supported.
 			 */
+			subpage = page;
 			goto discard;
 		}
 
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 8721360..d515d13 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2364,6 +2364,12 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
 		return NULL;
 
 	/*
+	 * First make sure the mappings are removed from all page-tables
+	 * before they are freed.
+	 */
+	vmalloc_sync_all();
+
+	/*
 	 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
 	 * flag. It means that vm_struct is not fully initialized.
 	 * Now, it is fully initialized, so remove this flag here.
@@ -2908,6 +2914,9 @@ EXPORT_SYMBOL(remap_vmalloc_range);
 /*
  * Implement a stub for vmalloc_sync_all() if the architecture chose not to
  * have one.
+ *
+ * The purpose of this function is to make sure the vmalloc area
+ * mappings are identical in all page-tables in the system.
  */
 void __weak vmalloc_sync_all(void)
 {
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 6d97f19..821de0d 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -53,6 +53,7 @@
 #include <linux/zpool.h>
 #include <linux/mount.h>
 #include <linux/migrate.h>
+#include <linux/wait.h>
 #include <linux/pagemap.h>
 #include <linux/fs.h>
 
@@ -267,6 +268,10 @@ struct zs_pool {
 #ifdef CONFIG_COMPACTION
 	struct inode *inode;
 	struct work_struct free_work;
+	/* A wait queue for when migration races with async_free_zspage() */
+	struct wait_queue_head migration_wait;
+	atomic_long_t isolated_pages;
+	bool destroying;
 #endif
 };
 
@@ -1882,6 +1887,31 @@ static void dec_zspage_isolation(struct zspage *zspage)
 	zspage->isolated--;
 }
 
+static void putback_zspage_deferred(struct zs_pool *pool,
+				    struct size_class *class,
+				    struct zspage *zspage)
+{
+	enum fullness_group fg;
+
+	fg = putback_zspage(class, zspage);
+	if (fg == ZS_EMPTY)
+		schedule_work(&pool->free_work);
+
+}
+
+static inline void zs_pool_dec_isolated(struct zs_pool *pool)
+{
+	VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0);
+	atomic_long_dec(&pool->isolated_pages);
+	/*
+	 * There's no possibility of racing, since wait_for_isolated_drain()
+	 * checks the isolated count under &class->lock after enqueuing
+	 * on migration_wait.
+	 */
+	if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying)
+		wake_up_all(&pool->migration_wait);
+}
+
 static void replace_sub_page(struct size_class *class, struct zspage *zspage,
 				struct page *newpage, struct page *oldpage)
 {
@@ -1951,6 +1981,7 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
 	 */
 	if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
 		get_zspage_mapping(zspage, &class_idx, &fullness);
+		atomic_long_inc(&pool->isolated_pages);
 		remove_zspage(class, zspage, fullness);
 	}
 
@@ -2050,8 +2081,16 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
 	 * Page migration is done so let's putback isolated zspage to
 	 * the list if @page is final isolated subpage in the zspage.
 	 */
-	if (!is_zspage_isolated(zspage))
-		putback_zspage(class, zspage);
+	if (!is_zspage_isolated(zspage)) {
+		/*
+		 * We cannot race with zs_destroy_pool() here because we wait
+		 * for isolation to hit zero before we start destroying.
+		 * Also, we ensure that everyone can see pool->destroying before
+		 * we start waiting.
+		 */
+		putback_zspage_deferred(pool, class, zspage);
+		zs_pool_dec_isolated(pool);
+	}
 
 	reset_page(page);
 	put_page(page);
@@ -2097,13 +2136,12 @@ static void zs_page_putback(struct page *page)
 	spin_lock(&class->lock);
 	dec_zspage_isolation(zspage);
 	if (!is_zspage_isolated(zspage)) {
-		fg = putback_zspage(class, zspage);
 		/*
 		 * Due to page_lock, we cannot free zspage immediately
 		 * so let's defer.
 		 */
-		if (fg == ZS_EMPTY)
-			schedule_work(&pool->free_work);
+		putback_zspage_deferred(pool, class, zspage);
+		zs_pool_dec_isolated(pool);
 	}
 	spin_unlock(&class->lock);
 }
@@ -2127,8 +2165,36 @@ static int zs_register_migration(struct zs_pool *pool)
 	return 0;
 }
 
+static bool pool_isolated_are_drained(struct zs_pool *pool)
+{
+	return atomic_long_read(&pool->isolated_pages) == 0;
+}
+
+/* Function for resolving migration */
+static void wait_for_isolated_drain(struct zs_pool *pool)
+{
+
+	/*
+	 * We're in the process of destroying the pool, so there are no
+	 * active allocations. zs_page_isolate() fails for completely free
+	 * zspages, so we need only wait for the zs_pool's isolated
+	 * count to hit zero.
+	 */
+	wait_event(pool->migration_wait,
+		   pool_isolated_are_drained(pool));
+}
+
 static void zs_unregister_migration(struct zs_pool *pool)
 {
+	pool->destroying = true;
+	/*
+	 * We need a memory barrier here to ensure global visibility of
+	 * pool->destroying. Thus pool->isolated pages will either be 0 in which
+	 * case we don't care, or it will be > 0 and pool->destroying will
+	 * ensure that we wake up once isolation hits 0.
+	 */
+	smp_mb();
+	wait_for_isolated_drain(pool); /* This can block */
 	flush_work(&pool->free_work);
 	iput(pool->inode);
 }
@@ -2366,6 +2432,8 @@ struct zs_pool *zs_create_pool(const char *name)
 	if (!pool->name)
 		goto err;
 
+	init_waitqueue_head(&pool->migration_wait);
+
 	if (create_cache(pool))
 		goto err;
 
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 0bb4d71..62ffc98 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1779,20 +1779,28 @@ static int compat_calc_entry(const struct ebt_entry *e,
 	return 0;
 }
 
+static int ebt_compat_init_offsets(unsigned int number)
+{
+	if (number > INT_MAX)
+		return -EINVAL;
+
+	/* also count the base chain policies */
+	number += NF_BR_NUMHOOKS;
+
+	return xt_compat_init_offsets(NFPROTO_BRIDGE, number);
+}
 
 static int compat_table_info(const struct ebt_table_info *info,
 			     struct compat_ebt_replace *newinfo)
 {
 	unsigned int size = info->entries_size;
 	const void *entries = info->entries;
+	int ret;
 
 	newinfo->entries_size = size;
-	if (info->nentries) {
-		int ret = xt_compat_init_offsets(NFPROTO_BRIDGE,
-						 info->nentries);
-		if (ret)
-			return ret;
-	}
+	ret = ebt_compat_init_offsets(info->nentries);
+	if (ret)
+		return ret;
 
 	return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
 							entries, newinfo);
@@ -2241,11 +2249,9 @@ static int compat_do_replace(struct net *net, void __user *user,
 
 	xt_compat_lock(NFPROTO_BRIDGE);
 
-	if (tmp.nentries) {
-		ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
-		if (ret < 0)
-			goto out_unlock;
-	}
+	ret = ebt_compat_init_offsets(tmp.nentries);
+	if (ret < 0)
+		goto out_unlock;
 
 	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
 	if (ret < 0)
@@ -2268,8 +2274,10 @@ static int compat_do_replace(struct net *net, void __user *user,
 	state.buf_kern_len = size64;
 
 	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
-	if (WARN_ON(ret < 0))
+	if (WARN_ON(ret < 0)) {
+		vfree(entries_tmp);
 		goto out_unlock;
+	}
 
 	vfree(entries_tmp);
 	tmp.entries_size = size64;
diff --git a/net/can/gw.c b/net/can/gw.c
index 53859346..bd21614 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -1046,32 +1046,50 @@ static __init int cgw_module_init(void)
 	pr_info("can: netlink gateway (rev " CAN_GW_VERSION ") max_hops=%d\n",
 		max_hops);
 
-	register_pernet_subsys(&cangw_pernet_ops);
+	ret = register_pernet_subsys(&cangw_pernet_ops);
+	if (ret)
+		return ret;
+
+	ret = -ENOMEM;
 	cgw_cache = kmem_cache_create("can_gw", sizeof(struct cgw_job),
 				      0, 0, NULL);
-
 	if (!cgw_cache)
-		return -ENOMEM;
+		goto out_cache_create;
 
 	/* set notifier */
 	notifier.notifier_call = cgw_notifier;
-	register_netdevice_notifier(&notifier);
+	ret = register_netdevice_notifier(&notifier);
+	if (ret)
+		goto out_register_notifier;
 
 	ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_GETROUTE,
 				   NULL, cgw_dump_jobs, 0);
-	if (ret) {
-		unregister_netdevice_notifier(&notifier);
-		kmem_cache_destroy(cgw_cache);
-		return -ENOBUFS;
-	}
+	if (ret)
+		goto out_rtnl_register1;
 
-	/* Only the first call to rtnl_register_module can fail */
-	rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE,
-			     cgw_create_job, NULL, 0);
-	rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE,
-			     cgw_remove_job, NULL, 0);
+	ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE,
+				   cgw_create_job, NULL, 0);
+	if (ret)
+		goto out_rtnl_register2;
+	ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE,
+				   cgw_remove_job, NULL, 0);
+	if (ret)
+		goto out_rtnl_register3;
 
 	return 0;
+
+out_rtnl_register3:
+	rtnl_unregister(PF_CAN, RTM_NEWROUTE);
+out_rtnl_register2:
+	rtnl_unregister(PF_CAN, RTM_GETROUTE);
+out_rtnl_register1:
+	unregister_netdevice_notifier(&notifier);
+out_register_notifier:
+	kmem_cache_destroy(cgw_cache);
+out_cache_create:
+	unregister_pernet_subsys(&cangw_pernet_ops);
+
+	return ret;
 }
 
 static __exit void cgw_module_exit(void)
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 60934bd..76c41a8 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1423,7 +1423,7 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
 	struct ceph_osds up, acting;
 	bool force_resend = false;
 	bool unpaused = false;
-	bool legacy_change;
+	bool legacy_change = false;
 	bool split = false;
 	bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
 	bool recovery_deletes = ceph_osdmap_flag(osdc,
@@ -1511,15 +1511,14 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
 		t->osd = acting.primary;
 	}
 
-	if (unpaused || legacy_change || force_resend ||
-	    (split && con && CEPH_HAVE_FEATURE(con->peer_features,
-					       RESEND_ON_SPLIT)))
+	if (unpaused || legacy_change || force_resend || split)
 		ct_res = CALC_TARGET_NEED_RESEND;
 	else
 		ct_res = CALC_TARGET_NO_ACTION;
 
 out:
-	dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd);
+	dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused,
+	     legacy_change, force_resend, split, ct_res, t->osd);
 	return ct_res;
 }
 
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index 142b294..b0b9413 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -127,6 +127,9 @@ static void dsa_switch_mdb_add_bitmap(struct dsa_switch *ds,
 {
 	int port;
 
+	if (!ds->ops->port_mdb_add)
+		return;
+
 	for_each_set_bit(port, bitmap, ds->num_ports)
 		ds->ops->port_mdb_add(ds, port, mdb);
 }
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index 12843c9..74b19a5 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -96,6 +96,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
 	flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
 	flow.flowi4_tos = RT_TOS(iph->tos);
 	flow.flowi4_scope = RT_SCOPE_UNIVERSE;
+	flow.flowi4_oif = l3mdev_master_ifindex_rcu(xt_in(par));
 
 	return rpfilter_lookup_reverse(xt_net(par), &flow, xt_in(par), info->flags) ^ invert;
 }
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
index c3c6b09..0f3407f 100644
--- a/net/ipv6/netfilter/ip6t_rpfilter.c
+++ b/net/ipv6/netfilter/ip6t_rpfilter.c
@@ -58,7 +58,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
 	if (rpfilter_addr_linklocal(&iph->saddr)) {
 		lookup_flags |= RT6_LOOKUP_F_IFACE;
 		fl6.flowi6_oif = dev->ifindex;
-	} else if ((flags & XT_RPFILTER_LOOSE) == 0)
+	/* Set flowi6_oif for vrf devices to lookup route in l3mdev domain. */
+	} else if (netif_is_l3_master(dev) || netif_is_l3_slave(dev) ||
+		  (flags & XT_RPFILTER_LOOSE) == 0)
 		fl6.flowi6_oif = dev->ifindex;
 
 	rt = (void *)ip6_route_lookup(net, &fl6, skb, lookup_flags);
@@ -73,7 +75,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
 		goto out;
 	}
 
-	if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE))
+	if (rt->rt6i_idev->dev == dev ||
+	    l3mdev_master_ifindex_rcu(rt->rt6i_idev->dev) == dev->ifindex ||
+	    (flags & XT_RPFILTER_LOOSE))
 		ret = true;
  out:
 	ip6_rt_put(rt);
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
index bb886e7..f783d13 100644
--- a/net/mac80211/driver-ops.c
+++ b/net/mac80211/driver-ops.c
@@ -169,11 +169,16 @@ int drv_conf_tx(struct ieee80211_local *local,
 	if (!check_sdata_in_driver(sdata))
 		return -EIO;
 
-	if (WARN_ONCE(params->cw_min == 0 ||
-		      params->cw_min > params->cw_max,
-		      "%s: invalid CW_min/CW_max: %d/%d\n",
-		      sdata->name, params->cw_min, params->cw_max))
+	if (params->cw_min == 0 || params->cw_min > params->cw_max) {
+		/*
+		 * If we can't configure hardware anyway, don't warn. We may
+		 * never have initialized the CW parameters.
+		 */
+		WARN_ONCE(local->ops->conf_tx,
+			  "%s: invalid CW_min/CW_max: %d/%d\n",
+			  sdata->name, params->cw_min, params->cw_max);
 		return -EINVAL;
+	}
 
 	trace_drv_conf_tx(local, sdata, ac, params);
 	if (local->ops->conf_tx)
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 1aaa73fa..dbd9a31 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -160,10 +160,10 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
 	memcpy(&sta_ht_cap, &sband->ht_cap, sizeof(sta_ht_cap));
 	ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap);
 
+	memset(chandef, 0, sizeof(struct cfg80211_chan_def));
 	chandef->chan = channel;
 	chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
 	chandef->center_freq1 = channel->center_freq;
-	chandef->center_freq2 = 0;
 
 	if (!ht_oper || !sta_ht_cap.ht_supported) {
 		ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
@@ -1967,6 +1967,16 @@ ieee80211_sta_wmm_params(struct ieee80211_local *local,
 		ieee80211_regulatory_limit_wmm_params(sdata, &params[ac], ac);
 	}
 
+	/* WMM specification requires all 4 ACIs. */
+	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+		if (params[ac].cw_min == 0) {
+			sdata_info(sdata,
+				   "AP has invalid WMM params (missing AC %d), using defaults\n",
+				   ac);
+			return false;
+		}
+	}
+
 	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
 		mlme_dbg(sdata,
 			 "WMM AC=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d, downgraded=%d\n",
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 534a604..9afd3fc 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -264,6 +264,8 @@ static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info)
 	/* IEEE80211_RADIOTAP_RATE rate */
 	if (info->status.rates[0].idx >= 0 &&
 	    !(info->status.rates[0].flags & (IEEE80211_TX_RC_MCS |
+					     RATE_INFO_FLAGS_DMG |
+					     RATE_INFO_FLAGS_EDMG |
 					     IEEE80211_TX_RC_VHT_MCS)))
 		len += 2;
 
@@ -315,6 +317,8 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
 	/* IEEE80211_RADIOTAP_RATE */
 	if (info->status.rates[0].idx >= 0 &&
 	    !(info->status.rates[0].flags & (IEEE80211_TX_RC_MCS |
+					     RATE_INFO_FLAGS_DMG |
+					     RATE_INFO_FLAGS_EDMG |
 					     IEEE80211_TX_RC_VHT_MCS))) {
 		u16 rate;
 
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 13ade57..4f01321 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -230,7 +230,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
 
 	e.id = ip_to_id(map, ip);
 
-	if (opt->flags & IPSET_DIM_ONE_SRC)
+	if (opt->flags & IPSET_DIM_TWO_SRC)
 		ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
 	else
 		ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 1577f2f..e2538c57 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1157,7 +1157,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl,
 		return -ENOENT;
 
 	write_lock_bh(&ip_set_ref_lock);
-	if (set->ref != 0) {
+	if (set->ref != 0 || set->ref_netlink != 0) {
 		ret = -IPSET_ERR_REFERENCED;
 		goto out;
 	}
diff --git a/net/netfilter/ipset/ip_set_hash_ipmac.c b/net/netfilter/ipset/ip_set_hash_ipmac.c
index fd87de3..16ec822 100644
--- a/net/netfilter/ipset/ip_set_hash_ipmac.c
+++ b/net/netfilter/ipset/ip_set_hash_ipmac.c
@@ -95,15 +95,11 @@ hash_ipmac4_kadt(struct ip_set *set, const struct sk_buff *skb,
 	struct hash_ipmac4_elem e = { .ip = 0, { .foo[0] = 0, .foo[1] = 0 } };
 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
-	 /* MAC can be src only */
-	if (!(opt->flags & IPSET_DIM_TWO_SRC))
-		return 0;
-
 	if (skb_mac_header(skb) < skb->head ||
 	    (skb_mac_header(skb) + ETH_HLEN) > skb->data)
 		return -EINVAL;
 
-	if (opt->flags & IPSET_DIM_ONE_SRC)
+	if (opt->flags & IPSET_DIM_TWO_SRC)
 		ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
 	else
 		ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 27eff89..c6073d1 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -431,13 +431,12 @@ EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
  * table location, we assume id gets exposed to userspace.
  *
  * Following nf_conn items do not change throughout lifetime
- * of the nf_conn after it has been committed to main hash table:
+ * of the nf_conn:
  *
  * 1. nf_conn address
- * 2. nf_conn->ext address
- * 3. nf_conn->master address (normally NULL)
- * 4. tuple
- * 5. the associated net namespace
+ * 2. nf_conn->master address (normally NULL)
+ * 3. the associated net namespace
+ * 4. the original direction tuple
  */
 u32 nf_ct_get_id(const struct nf_conn *ct)
 {
@@ -447,9 +446,10 @@ u32 nf_ct_get_id(const struct nf_conn *ct)
 	net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
 
 	a = (unsigned long)ct;
-	b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct));
-	c = (unsigned long)ct->ext;
-	d = (unsigned long)siphash(&ct->tuplehash, sizeof(ct->tuplehash),
+	b = (unsigned long)ct->master;
+	c = (unsigned long)nf_ct_net(ct);
+	d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+				   sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
 				   &ct_id_seed);
 #ifdef CONFIG_64BIT
 	return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 842f3f8..7011ab2 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -480,6 +480,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
 	struct ip_ct_tcp_state *receiver = &state->seen[!dir];
 	const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
 	__u32 seq, ack, sack, end, win, swin;
+	u16 win_raw;
 	s32 receiver_offset;
 	bool res, in_recv_win;
 
@@ -488,7 +489,8 @@ static bool tcp_in_window(const struct nf_conn *ct,
 	 */
 	seq = ntohl(tcph->seq);
 	ack = sack = ntohl(tcph->ack_seq);
-	win = ntohs(tcph->window);
+	win_raw = ntohs(tcph->window);
+	win = win_raw;
 	end = segment_seq_plus_len(seq, skb->len, dataoff, tcph);
 
 	if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
@@ -663,14 +665,14 @@ static bool tcp_in_window(const struct nf_conn *ct,
 			    && state->last_seq == seq
 			    && state->last_ack == ack
 			    && state->last_end == end
-			    && state->last_win == win)
+			    && state->last_win == win_raw)
 				state->retrans++;
 			else {
 				state->last_dir = dir;
 				state->last_seq = seq;
 				state->last_ack = ack;
 				state->last_end = end;
-				state->last_win = win;
+				state->last_win = win_raw;
 				state->retrans = 0;
 			}
 		}
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 9169134..7f2c191 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -575,7 +575,7 @@ static int nfnetlink_bind(struct net *net, int group)
 	ss = nfnetlink_get_subsys(type << 8);
 	rcu_read_unlock();
 	if (!ss)
-		request_module("nfnetlink-subsys-%d", type);
+		request_module_nowait("nfnetlink-subsys-%d", type);
 	return 0;
 }
 #endif
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index c2d2371..b8f23f7 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -196,7 +196,7 @@ static int nft_symhash_init(const struct nft_ctx *ctx,
 	priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
 
 	priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS]));
-	if (priv->modulus <= 1)
+	if (priv->modulus < 1)
 		return -ERANGE;
 
 	if (priv->offset + priv->modulus - 1 < priv->offset)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 33e982b..7e25a6a 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2616,6 +2616,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
 
 	mutex_lock(&po->pg_vec_lock);
 
+	/* packet_sendmsg() check on tx_ring.pg_vec was lockless,
+	 * we need to confirm it under protection of pg_vec_lock.
+	 */
+	if (unlikely(!po->tx_ring.pg_vec)) {
+		err = -EBUSY;
+		goto out;
+	}
 	if (likely(saddr == NULL)) {
 		dev	= packet_cached_dev_get(po);
 		proto	= po->num;
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 3a3378c..2c9d7de 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -20,6 +20,7 @@
 #include <linux/rwsem.h>
 #include <linux/ipc_logging.h>
 #include <linux/uidgid.h>
+#include <linux/pm_wakeup.h>
 
 #include <net/sock.h>
 #include <uapi/linux/sched/types.h>
@@ -150,6 +151,7 @@ static DEFINE_MUTEX(qrtr_port_lock);
  * @kworker: worker thread for recv work
  * @task: task to run the worker thread
  * @read_data: scheduled work for recv work
+ * @ws: wakeupsource avoid system suspend
  * @ilc: ipc logging context reference
  */
 struct qrtr_node {
@@ -171,6 +173,8 @@ struct qrtr_node {
 	struct task_struct *task;
 	struct kthread_work read_data;
 
+	struct wakeup_source *ws;
+
 	void *ilc;
 };
 
@@ -347,6 +351,7 @@ static void __qrtr_node_release(struct kref *kref)
 	}
 	mutex_unlock(&node->qrtr_tx_lock);
 
+	wakeup_source_unregister(node->ws);
 	kthread_flush_worker(&node->kworker);
 	kthread_stop(node->task);
 
@@ -611,10 +616,16 @@ static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
 		node->nid = nid;
 	up_write(&qrtr_node_lock);
 
+	snprintf(name, sizeof(name), "qrtr_%d", nid);
 	if (!node->ilc) {
-		snprintf(name, sizeof(name), "qrtr_%d", nid);
 		node->ilc = ipc_log_context_create(QRTR_LOG_PAGE_CNT, name, 0);
 	}
+	/* create wakeup source for only NID = 3,0 or 7.
+	 * From other nodes sensor service stream samples
+	 * cause APPS suspend problems and power drain issue.
+	 */
+	if (!node->ws && (nid == 0 || nid == 3 || nid == 7))
+		node->ws = wakeup_source_register(name);
 }
 
 /**
@@ -745,6 +756,8 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
 	    cb->type != QRTR_TYPE_RESUME_TX)
 		goto err;
 
+	pm_wakeup_ws_event(node->ws, 0, true);
+
 	if (frag) {
 		skb->data_len = size;
 		skb->len = size;
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index d76e5e5..7319d3c 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -195,7 +195,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
 
 service_in_use:
 	write_unlock(&local->services_lock);
-	rxrpc_put_local(local);
+	rxrpc_unuse_local(local);
 	ret = -EADDRINUSE;
 error_unlock:
 	release_sock(&rx->sk);
@@ -908,7 +908,7 @@ static int rxrpc_release_sock(struct sock *sk)
 	rxrpc_queue_work(&rxnet->service_conn_reaper);
 	rxrpc_queue_work(&rxnet->client_conn_reaper);
 
-	rxrpc_put_local(rx->local);
+	rxrpc_unuse_local(rx->local);
 	rx->local = NULL;
 	key_put(rx->key);
 	rx->key = NULL;
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 7444d8b..8d72e94 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -258,7 +258,8 @@ struct rxrpc_security {
  */
 struct rxrpc_local {
 	struct rcu_head		rcu;
-	atomic_t		usage;
+	atomic_t		active_users;	/* Number of users of the local endpoint */
+	atomic_t		usage;		/* Number of references to the structure */
 	struct rxrpc_net	*rxnet;		/* The network ns in which this resides */
 	struct list_head	link;
 	struct socket		*socket;	/* my UDP socket */
@@ -998,6 +999,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc
 struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *);
 struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *);
 void rxrpc_put_local(struct rxrpc_local *);
+struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *);
+void rxrpc_unuse_local(struct rxrpc_local *);
 void rxrpc_queue_local(struct rxrpc_local *);
 void rxrpc_destroy_all_locals(struct rxrpc_net *);
 
@@ -1057,6 +1060,7 @@ void rxrpc_destroy_all_peers(struct rxrpc_net *);
 struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
 struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
 void rxrpc_put_peer(struct rxrpc_peer *);
+void rxrpc_put_peer_locked(struct rxrpc_peer *);
 
 /*
  * proc.c
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index d591f54..7965600 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -1106,8 +1106,12 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
 {
 	_enter("%p,%p", local, skb);
 
-	skb_queue_tail(&local->event_queue, skb);
-	rxrpc_queue_local(local);
+	if (rxrpc_get_local_maybe(local)) {
+		skb_queue_tail(&local->event_queue, skb);
+		rxrpc_queue_local(local);
+	} else {
+		rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+	}
 }
 
 /*
@@ -1117,8 +1121,12 @@ static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
 {
 	CHECK_SLAB_OKAY(&local->usage);
 
-	skb_queue_tail(&local->reject_queue, skb);
-	rxrpc_queue_local(local);
+	if (rxrpc_get_local_maybe(local)) {
+		skb_queue_tail(&local->reject_queue, skb);
+		rxrpc_queue_local(local);
+	} else {
+		rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+	}
 }
 
 /*
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 10317db..c752ad4 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -83,6 +83,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
 	local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
 	if (local) {
 		atomic_set(&local->usage, 1);
+		atomic_set(&local->active_users, 1);
 		local->rxnet = rxnet;
 		INIT_LIST_HEAD(&local->link);
 		INIT_WORK(&local->processor, rxrpc_local_processor);
@@ -96,7 +97,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
 		local->debug_id = atomic_inc_return(&rxrpc_debug_id);
 		memcpy(&local->srx, srx, sizeof(*srx));
 		local->srx.srx_service = 0;
-		trace_rxrpc_local(local, rxrpc_local_new, 1, NULL);
+		trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, NULL);
 	}
 
 	_leave(" = %p", local);
@@ -270,11 +271,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
 		 * bind the transport socket may still fail if we're attempting
 		 * to use a local address that the dying object is still using.
 		 */
-		if (!rxrpc_get_local_maybe(local)) {
-			cursor = cursor->next;
-			list_del_init(&local->link);
+		if (!rxrpc_use_local(local))
 			break;
-		}
 
 		age = "old";
 		goto found;
@@ -288,7 +286,10 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
 	if (ret < 0)
 		goto sock_error;
 
-	list_add_tail(&local->link, cursor);
+	if (cursor != &rxnet->local_endpoints)
+		list_replace_init(cursor, &local->link);
+	else
+		list_add_tail(&local->link, cursor);
 	age = "new";
 
 found:
@@ -324,7 +325,7 @@ struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local)
 	int n;
 
 	n = atomic_inc_return(&local->usage);
-	trace_rxrpc_local(local, rxrpc_local_got, n, here);
+	trace_rxrpc_local(local->debug_id, rxrpc_local_got, n, here);
 	return local;
 }
 
@@ -338,7 +339,8 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
 	if (local) {
 		int n = atomic_fetch_add_unless(&local->usage, 1, 0);
 		if (n > 0)
-			trace_rxrpc_local(local, rxrpc_local_got, n + 1, here);
+			trace_rxrpc_local(local->debug_id, rxrpc_local_got,
+					  n + 1, here);
 		else
 			local = NULL;
 	}
@@ -346,24 +348,18 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
 }
 
 /*
- * Queue a local endpoint.
+ * Queue a local endpoint and pass the caller's reference to the work item.
  */
 void rxrpc_queue_local(struct rxrpc_local *local)
 {
 	const void *here = __builtin_return_address(0);
+	unsigned int debug_id = local->debug_id;
+	int n = atomic_read(&local->usage);
 
 	if (rxrpc_queue_work(&local->processor))
-		trace_rxrpc_local(local, rxrpc_local_queued,
-				  atomic_read(&local->usage), here);
-}
-
-/*
- * A local endpoint reached its end of life.
- */
-static void __rxrpc_put_local(struct rxrpc_local *local)
-{
-	_enter("%d", local->debug_id);
-	rxrpc_queue_work(&local->processor);
+		trace_rxrpc_local(debug_id, rxrpc_local_queued, n, here);
+	else
+		rxrpc_put_local(local);
 }
 
 /*
@@ -376,10 +372,47 @@ void rxrpc_put_local(struct rxrpc_local *local)
 
 	if (local) {
 		n = atomic_dec_return(&local->usage);
-		trace_rxrpc_local(local, rxrpc_local_put, n, here);
+		trace_rxrpc_local(local->debug_id, rxrpc_local_put, n, here);
 
 		if (n == 0)
-			__rxrpc_put_local(local);
+			call_rcu(&local->rcu, rxrpc_local_rcu);
+	}
+}
+
+/*
+ * Start using a local endpoint.
+ */
+struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local)
+{
+	unsigned int au;
+
+	local = rxrpc_get_local_maybe(local);
+	if (!local)
+		return NULL;
+
+	au = atomic_fetch_add_unless(&local->active_users, 1, 0);
+	if (au == 0) {
+		rxrpc_put_local(local);
+		return NULL;
+	}
+
+	return local;
+}
+
+/*
+ * Cease using a local endpoint.  Once the number of active users reaches 0, we
+ * start the closure of the transport in the work processor.
+ */
+void rxrpc_unuse_local(struct rxrpc_local *local)
+{
+	unsigned int au;
+
+	if (local) {
+		au = atomic_dec_return(&local->active_users);
+		if (au == 0)
+			rxrpc_queue_local(local);
+		else
+			rxrpc_put_local(local);
 	}
 }
 
@@ -397,16 +430,6 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
 
 	_enter("%d", local->debug_id);
 
-	/* We can get a race between an incoming call packet queueing the
-	 * processor again and the work processor starting the destruction
-	 * process which will shut down the UDP socket.
-	 */
-	if (local->dead) {
-		_leave(" [already dead]");
-		return;
-	}
-	local->dead = true;
-
 	mutex_lock(&rxnet->local_mutex);
 	list_del_init(&local->link);
 	mutex_unlock(&rxnet->local_mutex);
@@ -426,13 +449,11 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
 	 */
 	rxrpc_purge_queue(&local->reject_queue);
 	rxrpc_purge_queue(&local->event_queue);
-
-	_debug("rcu local %d", local->debug_id);
-	call_rcu(&local->rcu, rxrpc_local_rcu);
 }
 
 /*
- * Process events on an endpoint
+ * Process events on an endpoint.  The work item carries a ref which
+ * we must release.
  */
 static void rxrpc_local_processor(struct work_struct *work)
 {
@@ -440,13 +461,15 @@ static void rxrpc_local_processor(struct work_struct *work)
 		container_of(work, struct rxrpc_local, processor);
 	bool again;
 
-	trace_rxrpc_local(local, rxrpc_local_processing,
+	trace_rxrpc_local(local->debug_id, rxrpc_local_processing,
 			  atomic_read(&local->usage), NULL);
 
 	do {
 		again = false;
-		if (atomic_read(&local->usage) == 0)
-			return rxrpc_local_destroyer(local);
+		if (atomic_read(&local->active_users) == 0) {
+			rxrpc_local_destroyer(local);
+			break;
+		}
 
 		if (!skb_queue_empty(&local->reject_queue)) {
 			rxrpc_reject_packets(local);
@@ -458,6 +481,8 @@ static void rxrpc_local_processor(struct work_struct *work)
 			again = true;
 		}
 	} while (again);
+
+	rxrpc_put_local(local);
 }
 
 /*
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index bd2fa3b..dc7fdaf 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -375,7 +375,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
 		spin_lock_bh(&rxnet->peer_hash_lock);
 		list_add_tail(&peer->keepalive_link,
 			      &rxnet->peer_keepalive[slot & mask]);
-		rxrpc_put_peer(peer);
+		rxrpc_put_peer_locked(peer);
 	}
 
 	spin_unlock_bh(&rxnet->peer_hash_lock);
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 5691b7d..71547e8 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -441,6 +441,24 @@ void rxrpc_put_peer(struct rxrpc_peer *peer)
 }
 
 /*
+ * Drop a ref on a peer record where the caller already holds the
+ * peer_hash_lock.
+ */
+void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
+{
+	const void *here = __builtin_return_address(0);
+	int n;
+
+	n = atomic_dec_return(&peer->usage);
+	trace_rxrpc_peer(peer, rxrpc_peer_put, n, here);
+	if (n == 0) {
+		hash_del_rcu(&peer->hash_link);
+		list_del_init(&peer->keepalive_link);
+		kfree_rcu(peer, rcu);
+	}
+}
+
+/*
  * Make sure all peer records have been discarded.
  */
 void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet)
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index be01f9c..5d6ab4f 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -230,6 +230,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
 			rxrpc_set_call_completion(call,
 						  RXRPC_CALL_LOCAL_ERROR,
 						  0, ret);
+			rxrpc_notify_socket(call);
 			goto out;
 		}
 		_debug("need instant resend %d", ret);
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 3131b41..28adac3 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -561,7 +561,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
 	 */
 	if (net->sctp.pf_enable &&
 	   (transport->state == SCTP_ACTIVE) &&
-	   (asoc->pf_retrans < transport->pathmaxrxt) &&
+	   (transport->error_count < transport->pathmaxrxt) &&
 	   (transport->error_count > asoc->pf_retrans)) {
 
 		sctp_assoc_control_transport(asoc, transport,
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 0da5793..87061a4b 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -416,6 +416,7 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
 		nstr_list[i] = htons(str_list[i]);
 
 	if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) {
+		kfree(nstr_list);
 		retval = -EAGAIN;
 		goto out;
 	}
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index b88d48d..0f1eaed 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -75,6 +75,7 @@ void tipc_set_node_addr(struct net *net, u32 addr)
 		tipc_set_node_id(net, node_id);
 	}
 	tn->trial_addr = addr;
+	tn->addr_trial_end = jiffies;
 	pr_info("32-bit node address hash set to %x\n", addr);
 }
 
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 2db713d..5d5333a 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -13,6 +13,11 @@
 #include "core.h"
 #include "rdev-ops.h"
 
+static bool cfg80211_valid_60g_freq(u32 freq)
+{
+	return freq >= 58320 && freq <= 70200;
+}
+
 void cfg80211_chandef_create(struct cfg80211_chan_def *chandef,
 			     struct ieee80211_channel *chan,
 			     enum nl80211_channel_type chan_type)
@@ -22,6 +27,8 @@ void cfg80211_chandef_create(struct cfg80211_chan_def *chandef,
 
 	chandef->chan = chan;
 	chandef->center_freq2 = 0;
+	chandef->edmg.bw_config = 0;
+	chandef->edmg.channels = 0;
 
 	switch (chan_type) {
 	case NL80211_CHAN_NO_HT:
@@ -46,6 +53,91 @@ void cfg80211_chandef_create(struct cfg80211_chan_def *chandef,
 }
 EXPORT_SYMBOL(cfg80211_chandef_create);
 
+static bool cfg80211_edmg_chandef_valid(const struct cfg80211_chan_def *chandef)
+{
+	int max_contiguous = 0;
+	int num_of_enabled = 0;
+	int contiguous = 0;
+	int i;
+
+	if (!chandef->edmg.channels || !chandef->edmg.bw_config)
+		return false;
+
+	if (!cfg80211_valid_60g_freq(chandef->chan->center_freq))
+		return false;
+
+	for (i = 0; i < 6; i++) {
+		if (chandef->edmg.channels & BIT(i)) {
+			contiguous++;
+			num_of_enabled++;
+		} else {
+			contiguous = 0;
+		}
+
+		max_contiguous = max(contiguous, max_contiguous);
+	}
+	/* basic verification of edmg configuration according to
+	 * IEEE P802.11ay/D4.0 section 9.4.2.251
+	 */
+	/* check bw_config against contiguous edmg channels */
+	switch (chandef->edmg.bw_config) {
+	case IEEE80211_EDMG_BW_CONFIG_4:
+	case IEEE80211_EDMG_BW_CONFIG_8:
+	case IEEE80211_EDMG_BW_CONFIG_12:
+		if (max_contiguous < 1)
+			return false;
+		break;
+	case IEEE80211_EDMG_BW_CONFIG_5:
+	case IEEE80211_EDMG_BW_CONFIG_9:
+	case IEEE80211_EDMG_BW_CONFIG_13:
+		if (max_contiguous < 2)
+			return false;
+		break;
+	case IEEE80211_EDMG_BW_CONFIG_6:
+	case IEEE80211_EDMG_BW_CONFIG_10:
+	case IEEE80211_EDMG_BW_CONFIG_14:
+		if (max_contiguous < 3)
+			return false;
+		break;
+	case IEEE80211_EDMG_BW_CONFIG_7:
+	case IEEE80211_EDMG_BW_CONFIG_11:
+	case IEEE80211_EDMG_BW_CONFIG_15:
+		if (max_contiguous < 4)
+			return false;
+		break;
+
+	default:
+		return false;
+	}
+
+	/* check bw_config against aggregated (non contiguous) edmg channels */
+	switch (chandef->edmg.bw_config) {
+	case IEEE80211_EDMG_BW_CONFIG_4:
+	case IEEE80211_EDMG_BW_CONFIG_5:
+	case IEEE80211_EDMG_BW_CONFIG_6:
+	case IEEE80211_EDMG_BW_CONFIG_7:
+		break;
+	case IEEE80211_EDMG_BW_CONFIG_8:
+	case IEEE80211_EDMG_BW_CONFIG_9:
+	case IEEE80211_EDMG_BW_CONFIG_10:
+	case IEEE80211_EDMG_BW_CONFIG_11:
+		if (num_of_enabled < 2)
+			return false;
+		break;
+	case IEEE80211_EDMG_BW_CONFIG_12:
+	case IEEE80211_EDMG_BW_CONFIG_13:
+	case IEEE80211_EDMG_BW_CONFIG_14:
+	case IEEE80211_EDMG_BW_CONFIG_15:
+		if (num_of_enabled < 4 || max_contiguous < 2)
+			return false;
+		break;
+	default:
+		return false;
+	}
+
+	return true;
+}
+
 bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
 {
 	u32 control_freq;
@@ -111,6 +203,10 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
 		return false;
 	}
 
+	if (cfg80211_chandef_is_edmg(chandef) &&
+	    !cfg80211_edmg_chandef_valid(chandef))
+		return false;
+
 	return true;
 }
 EXPORT_SYMBOL(cfg80211_chandef_valid);
@@ -720,12 +816,66 @@ static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy,
 	return true;
 }
 
+/* check if the operating channels are valid and supported */
+static bool cfg80211_edmg_usable(struct wiphy *wiphy, u8 edmg_channels,
+				 enum ieee80211_edmg_bw_config edmg_bw_config,
+				 int primary_channel,
+				 struct ieee80211_edmg *edmg_cap)
+{
+	struct ieee80211_channel *chan;
+	int i, freq;
+	int channels_counter = 0;
+
+	if (!edmg_channels && !edmg_bw_config)
+		return true;
+
+	if ((!edmg_channels && edmg_bw_config) ||
+	    (edmg_channels && !edmg_bw_config))
+		return false;
+
+	if (!(edmg_channels & BIT(primary_channel - 1)))
+		return false;
+
+	/* 60GHz channels 1..6 */
+	for (i = 0; i < 6; i++) {
+		if (!(edmg_channels & BIT(i)))
+			continue;
+
+		if (!(edmg_cap->channels & BIT(i)))
+			return false;
+
+		channels_counter++;
+
+		freq = ieee80211_channel_to_frequency(i + 1,
+						      NL80211_BAND_60GHZ);
+		chan = ieee80211_get_channel(wiphy, freq);
+		if (!chan || chan->flags & IEEE80211_CHAN_DISABLED)
+			return false;
+	}
+
+	/* IEEE802.11 allows max 4 channels */
+	if (channels_counter > 4)
+		return false;
+
+	/* check bw_config is a subset of what driver supports
+	 * (see IEEE P802.11ay/D4.0 section 9.4.2.251, Table 13)
+	 */
+	if ((edmg_bw_config % 4) > (edmg_cap->bw_config % 4))
+		return false;
+
+	if (edmg_bw_config > edmg_cap->bw_config)
+		return false;
+
+	return true;
+}
+
 bool cfg80211_chandef_usable(struct wiphy *wiphy,
 			     const struct cfg80211_chan_def *chandef,
 			     u32 prohibited_flags)
 {
 	struct ieee80211_sta_ht_cap *ht_cap;
 	struct ieee80211_sta_vht_cap *vht_cap;
+	struct ieee80211_edmg *edmg_cap;
 	u32 width, control_freq, cap;
 
 	if (WARN_ON(!cfg80211_chandef_valid(chandef)))
@@ -733,6 +883,15 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
 
 	ht_cap = &wiphy->bands[chandef->chan->band]->ht_cap;
 	vht_cap = &wiphy->bands[chandef->chan->band]->vht_cap;
+	edmg_cap = &wiphy->bands[chandef->chan->band]->edmg_cap;
+
+	if (edmg_cap->channels &&
+	    !cfg80211_edmg_usable(wiphy,
+				  chandef->edmg.channels,
+				  chandef->edmg.bw_config,
+				  chandef->chan->hw_value,
+				  edmg_cap))
+		return false;
 
 	control_freq = chandef->chan->center_freq;
 
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 646d50c..0323ed8 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -209,6 +209,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
 
 	[NL80211_ATTR_WIPHY_FREQ] = { .type = NLA_U32 },
 	[NL80211_ATTR_WIPHY_CHANNEL_TYPE] = { .type = NLA_U32 },
+	[NL80211_ATTR_WIPHY_EDMG_CHANNELS] = { .type = NLA_U8 },
+	[NL80211_ATTR_WIPHY_EDMG_BW_CONFIG] = { .type = NLA_U8 },
 	[NL80211_ATTR_CHANNEL_WIDTH] = { .type = NLA_U32 },
 	[NL80211_ATTR_CENTER_FREQ1] = { .type = NLA_U32 },
 	[NL80211_ATTR_CENTER_FREQ2] = { .type = NLA_U32 },
@@ -1410,6 +1412,15 @@ static int nl80211_send_band_rateinfo(struct sk_buff *msg,
 		nla_nest_end(msg, nl_iftype_data);
 	}
 
+	/* add EDMG info */
+	if (sband->edmg_cap.channels &&
+	    (nla_put_u8(msg, NL80211_BAND_ATTR_EDMG_CHANNELS,
+		       sband->edmg_cap.channels) ||
+	    nla_put_u8(msg, NL80211_BAND_ATTR_EDMG_BW_CONFIG,
+		       sband->edmg_cap.bw_config)))
+
+		return -ENOBUFS;
+
 	/* add bitrates */
 	nl_rates = nla_nest_start(msg, NL80211_BAND_ATTR_RATES);
 	if (!nl_rates)
@@ -2348,6 +2359,18 @@ static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
 					info->attrs[NL80211_ATTR_CENTER_FREQ2]);
 	}
 
+	if (info->attrs[NL80211_ATTR_WIPHY_EDMG_CHANNELS]) {
+		chandef->edmg.channels =
+		      nla_get_u8(info->attrs[NL80211_ATTR_WIPHY_EDMG_CHANNELS]);
+
+		if (info->attrs[NL80211_ATTR_WIPHY_EDMG_BW_CONFIG])
+			chandef->edmg.bw_config =
+		     nla_get_u8(info->attrs[NL80211_ATTR_WIPHY_EDMG_BW_CONFIG]);
+	} else {
+		chandef->edmg.bw_config = 0;
+		chandef->edmg.channels = 0;
+	}
+
 	if (!cfg80211_chandef_valid(chandef))
 		return -EINVAL;
 
@@ -9407,6 +9430,15 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
 			return -EINVAL;
 	}
 
+	if (info->attrs[NL80211_ATTR_WIPHY_EDMG_CHANNELS]) {
+		connect.edmg.channels =
+		      nla_get_u8(info->attrs[NL80211_ATTR_WIPHY_EDMG_CHANNELS]);
+
+		if (info->attrs[NL80211_ATTR_WIPHY_EDMG_BW_CONFIG])
+			connect.edmg.bw_config =
+		     nla_get_u8(info->attrs[NL80211_ATTR_WIPHY_EDMG_BW_CONFIG]);
+	}
+
 	if (connect.privacy && info->attrs[NL80211_ATTR_KEYS]) {
 		connkeys = nl80211_parse_connkeys(rdev, info, NULL);
 		if (IS_ERR(connkeys))
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 83a1781..96f5c8e 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1009,7 +1009,7 @@ static u32 cfg80211_calculate_bitrate_ht(struct rate_info *rate)
 	return (bitrate + 50000) / 100000;
 }
 
-static u32 cfg80211_calculate_bitrate_60g(struct rate_info *rate)
+static u32 cfg80211_calculate_bitrate_dmg(struct rate_info *rate)
 {
 	static const u32 __mcs2bitrate[] = {
 		/* control PHY */
@@ -1056,6 +1056,40 @@ static u32 cfg80211_calculate_bitrate_60g(struct rate_info *rate)
 	return __mcs2bitrate[rate->mcs];
 }
 
+static u32 cfg80211_calculate_bitrate_edmg(struct rate_info *rate)
+{
+	static const u32 __mcs2bitrate[] = {
+		/* control PHY */
+		[0] =   275,
+		/* SC PHY */
+		[1] =  3850,
+		[2] =  7700,
+		[3] =  9625,
+		[4] = 11550,
+		[5] = 12512, /* 1251.25 mbps */
+		[6] = 13475,
+		[7] = 15400,
+		[8] = 19250,
+		[9] = 23100,
+		[10] = 25025,
+		[11] = 26950,
+		[12] = 30800,
+		[13] = 38500,
+		[14] = 46200,
+		[15] = 50050,
+		[16] = 53900,
+		[17] = 57750,
+		[18] = 69300,
+		[19] = 75075,
+		[20] = 80850,
+	};
+
+	if (WARN_ON_ONCE(rate->mcs >= ARRAY_SIZE(__mcs2bitrate)))
+		return 0;
+
+	return __mcs2bitrate[rate->mcs] * rate->n_bonded_ch;
+}
+
 static u32 cfg80211_calculate_bitrate_vht(struct rate_info *rate)
 {
 	static const u32 base[4][10] = {
@@ -1226,8 +1260,10 @@ u32 cfg80211_calculate_bitrate(struct rate_info *rate)
 {
 	if (rate->flags & RATE_INFO_FLAGS_MCS)
 		return cfg80211_calculate_bitrate_ht(rate);
-	if (rate->flags & RATE_INFO_FLAGS_60G)
-		return cfg80211_calculate_bitrate_60g(rate);
+	if (rate->flags & RATE_INFO_FLAGS_DMG)
+		return cfg80211_calculate_bitrate_dmg(rate);
+	if (rate->flags & RATE_INFO_FLAGS_EDMG)
+		return cfg80211_calculate_bitrate_edmg(rate);
 	if (rate->flags & RATE_INFO_FLAGS_VHT_MCS)
 		return cfg80211_calculate_bitrate_vht(rate);
 	if (rate->flags & RATE_INFO_FLAGS_HE_MCS)
diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include
index dad5583..3b2861f 100644
--- a/scripts/Kconfig.include
+++ b/scripts/Kconfig.include
@@ -20,7 +20,7 @@
 
 # $(cc-option,<flag>)
 # Return y if the compiler supports <flag>, n otherwise
-cc-option = $(success,$(CC) -Werror $(1) -E -x c /dev/null -o /dev/null)
+cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -E -x c /dev/null -o /dev/null)
 
 # $(ld-option,<flag>)
 # Return y if the linker supports <flag>, n otherwise
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 1771a31..15dd58a 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -75,7 +75,7 @@
  $(if $(CONFIG_MODULE_SRCVERSION_ALL),-a,)       \
  $(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile)   \
  $(if $(KBUILD_EXTMOD),-I $(modulesymfile))      \
- $(if $(KBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(KBUILD_EXTRA_SYMBOLS))) \
+ $(if $(KBUILD_EXTMOD),$(addprefix -e ,$(KBUILD_EXTRA_SYMBOLS))) \
  $(if $(KBUILD_EXTMOD),-o $(modulesymfile))      \
  $(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S)      \
  $(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E)  \
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 6ac3685..a5fe929 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -3118,7 +3118,7 @@
 				$compat2 =~ s/\,[a-zA-Z0-9]*\-/\,<\.\*>\-/;
 				my $compat3 = $compat;
 				$compat3 =~ s/\,([a-z]*)[0-9]*\-/\,$1<\.\*>\-/;
-				`grep -Erq "$compat|$compat2|$compat3" $dt_path`;
+				`grep -ERq "$compat|$compat2|$compat3" $dt_path`;
 				if ( $? >> 8 ) {
 					WARN("UNDOCUMENTED_DT_STRING",
 					     "DT compatible string \"$compat\" appears un-documented -- check $dt_path\n" . $herecurr);
diff --git a/scripts/sphinx-pre-install b/scripts/sphinx-pre-install
index 0674597..3524dbc 100755
--- a/scripts/sphinx-pre-install
+++ b/scripts/sphinx-pre-install
@@ -301,7 +301,7 @@
 	#
 	# Checks valid for RHEL/CentOS version 7.x.
 	#
-	if (! $system_release =~ /Fedora/) {
+	if (!($system_release =~ /Fedora/)) {
 		$map{"virtualenv"} = "python-virtualenv";
 	}
 
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 009e469..8e547e2 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -578,10 +578,7 @@ snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg)
 		stream->metadata_set = false;
 		stream->next_track = false;
 
-		if (stream->direction == SND_COMPRESS_PLAYBACK)
-			stream->runtime->state = SNDRV_PCM_STATE_SETUP;
-		else
-			stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
+		stream->runtime->state = SNDRV_PCM_STATE_SETUP;
 	} else {
 		return -EPERM;
 	}
@@ -698,8 +695,17 @@ static int snd_compr_start(struct snd_compr_stream *stream)
 {
 	int retval;
 
-	if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED)
+	switch (stream->runtime->state) {
+	case SNDRV_PCM_STATE_SETUP:
+		if (stream->direction != SND_COMPRESS_CAPTURE)
+			return -EPERM;
+		break;
+	case SNDRV_PCM_STATE_PREPARED:
+		break;
+	default:
 		return -EPERM;
+	}
+
 	retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
 	if (!retval)
 		stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
@@ -710,9 +716,15 @@ static int snd_compr_stop(struct snd_compr_stream *stream)
 {
 	int retval;
 
-	if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
-			stream->runtime->state == SNDRV_PCM_STATE_SETUP)
+	switch (stream->runtime->state) {
+	case SNDRV_PCM_STATE_OPEN:
+	case SNDRV_PCM_STATE_SETUP:
+	case SNDRV_PCM_STATE_PREPARED:
 		return -EPERM;
+	default:
+		break;
+	}
+
 	retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
 	if (!retval) {
 		stream->runtime->state = SNDRV_PCM_STATE_SETUP;
@@ -771,10 +783,18 @@ static int snd_compr_drain(struct snd_compr_stream *stream)
 	int retval;
 
 	mutex_lock(&stream->device->lock);
-	if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
-			stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
-		retval = -EPERM;
-		goto ret;
+	switch (stream->runtime->state) {
+	case SNDRV_PCM_STATE_OPEN:
+	case SNDRV_PCM_STATE_SETUP:
+	case SNDRV_PCM_STATE_PREPARED:
+	case SNDRV_PCM_STATE_PAUSED:
+		mutex_unlock(&stream->device->lock);
+		return -EPERM;
+	case SNDRV_PCM_STATE_XRUN:
+		mutex_unlock(&stream->device->lock);
+		return -EPIPE;
+	default:
+		break;
 	}
 	mutex_unlock(&stream->device->lock);
 	retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
@@ -798,6 +818,10 @@ static int snd_compr_next_track(struct snd_compr_stream *stream)
 	if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
 		return -EPERM;
 
+	/* next track doesn't have any meaning for capture streams */
+	if (stream->direction == SND_COMPRESS_CAPTURE)
+		return -EPERM;
+
 	/* you can signal next track if this is intended to be a gapless stream
 	 * and current track metadata is set
 	 */
@@ -817,12 +841,25 @@ static int snd_compr_partial_drain(struct snd_compr_stream *stream)
 	int retval;
 
 	mutex_lock(&stream->device->lock);
-	if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
-			stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
+	switch (stream->runtime->state) {
+	case SNDRV_PCM_STATE_OPEN:
+	case SNDRV_PCM_STATE_SETUP:
+	case SNDRV_PCM_STATE_PREPARED:
+	case SNDRV_PCM_STATE_PAUSED:
 		mutex_unlock(&stream->device->lock);
 		return -EPERM;
+	case SNDRV_PCM_STATE_XRUN:
+		mutex_unlock(&stream->device->lock);
+		return -EPIPE;
+	default:
+		break;
 	}
 	mutex_unlock(&stream->device->lock);
+
+	/* partial drain doesn't have any meaning for capture streams */
+	if (stream->direction == SND_COMPRESS_CAPTURE)
+		return -EPERM;
+
 	/* stream can be drained only when next track has been signalled */
 	if (stream->next_track == false)
 		return -EPERM;
diff --git a/sound/firewire/packets-buffer.c b/sound/firewire/packets-buffer.c
index 1ebf00c..715cd99 100644
--- a/sound/firewire/packets-buffer.c
+++ b/sound/firewire/packets-buffer.c
@@ -37,7 +37,7 @@ int iso_packets_buffer_init(struct iso_packets_buffer *b, struct fw_unit *unit,
 	packets_per_page = PAGE_SIZE / packet_size;
 	if (WARN_ON(!packets_per_page)) {
 		err = -EINVAL;
-		goto error;
+		goto err_packets;
 	}
 	pages = DIV_ROUND_UP(count, packets_per_page);
 
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index a12e594..a41c1be 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -609,11 +609,9 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
 	}
 	runtime->private_data = azx_dev;
 
-	if (chip->gts_present)
-		azx_pcm_hw.info = azx_pcm_hw.info |
-			SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
-
 	runtime->hw = azx_pcm_hw;
+	if (chip->gts_present)
+		runtime->hw.info |= SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
 	runtime->hw.channels_min = hinfo->channels_min;
 	runtime->hw.channels_max = hinfo->channels_max;
 	runtime->hw.formats = hinfo->formats;
@@ -626,6 +624,13 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
 				     20,
 				     178000000);
 
+	/* by some reason, the playback stream stalls on PulseAudio with
+	 * tsched=1 when a capture stream triggers.  Until we figure out the
+	 * real cause, disable tsched mode by telling the PCM info flag.
+	 */
+	if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND)
+		runtime->hw.info |= SNDRV_PCM_INFO_BATCH;
+
 	if (chip->align_buffer_size)
 		/* constrain buffer sizes to be multiple of 128
 		   bytes. This is more efficient in terms of memory
diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
index 53c3cd2..8a9dd47 100644
--- a/sound/pci/hda/hda_controller.h
+++ b/sound/pci/hda/hda_controller.h
@@ -40,7 +40,7 @@
 /* 14 unused */
 #define AZX_DCAPS_CTX_WORKAROUND (1 << 15)	/* X-Fi workaround */
 #define AZX_DCAPS_POSFIX_LPIB	(1 << 16)	/* Use LPIB as default */
-/* 17 unused */
+#define AZX_DCAPS_AMD_WORKAROUND (1 << 17)	/* AMD-specific workaround */
 #define AZX_DCAPS_NO_64BIT	(1 << 18)	/* No 64bit address */
 #define AZX_DCAPS_SYNC_WRITE	(1 << 19)	/* sync each cmd write */
 #define AZX_DCAPS_OLD_SSYNC	(1 << 20)	/* Old SSYNC reg for ICH */
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 579984e..bb2bd33 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -6033,6 +6033,24 @@ void snd_hda_gen_free(struct hda_codec *codec)
 }
 EXPORT_SYMBOL_GPL(snd_hda_gen_free);
 
+/**
+ * snd_hda_gen_reboot_notify - Make codec enter D3 before rebooting
+ * @codec: the HDA codec
+ *
+ * This can be put as patch_ops reboot_notify function.
+ */
+void snd_hda_gen_reboot_notify(struct hda_codec *codec)
+{
+	/* Make the codec enter D3 to avoid spurious noises from the internal
+	 * speaker during (and after) reboot
+	 */
+	snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
+	snd_hda_codec_write(codec, codec->core.afg, 0,
+			    AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
+	msleep(10);
+}
+EXPORT_SYMBOL_GPL(snd_hda_gen_reboot_notify);
+
 #ifdef CONFIG_PM
 /**
  * snd_hda_gen_check_power_status - check the loopback power save state
@@ -6060,6 +6078,7 @@ static const struct hda_codec_ops generic_patch_ops = {
 	.init = snd_hda_gen_init,
 	.free = snd_hda_gen_free,
 	.unsol_event = snd_hda_jack_unsol_event,
+	.reboot_notify = snd_hda_gen_reboot_notify,
 #ifdef CONFIG_PM
 	.check_power_status = snd_hda_gen_check_power_status,
 #endif
@@ -6082,7 +6101,7 @@ static int snd_hda_parse_generic_codec(struct hda_codec *codec)
 
 	err = snd_hda_parse_pin_defcfg(codec, &spec->autocfg, NULL, 0);
 	if (err < 0)
-		return err;
+		goto error;
 
 	err = snd_hda_gen_parse_auto_config(codec, &spec->autocfg);
 	if (err < 0)
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 1012366..ce9c293 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -336,6 +336,7 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
 				  struct auto_pin_cfg *cfg);
 int snd_hda_gen_build_controls(struct hda_codec *codec);
 int snd_hda_gen_build_pcms(struct hda_codec *codec);
+void snd_hda_gen_reboot_notify(struct hda_codec *codec);
 
 /* standard jack event callbacks */
 void snd_hda_gen_hp_automute(struct hda_codec *codec,
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 308ce76..7a3e34b 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -78,6 +78,7 @@ enum {
 	POS_FIX_VIACOMBO,
 	POS_FIX_COMBO,
 	POS_FIX_SKL,
+	POS_FIX_FIFO,
 };
 
 /* Defines for ATI HD Audio support in SB450 south bridge */
@@ -149,7 +150,7 @@ module_param_array(model, charp, NULL, 0444);
 MODULE_PARM_DESC(model, "Use the given board model.");
 module_param_array(position_fix, int, NULL, 0444);
 MODULE_PARM_DESC(position_fix, "DMA pointer read method."
-		 "(-1 = system default, 0 = auto, 1 = LPIB, 2 = POSBUF, 3 = VIACOMBO, 4 = COMBO, 5 = SKL+).");
+		 "(-1 = system default, 0 = auto, 1 = LPIB, 2 = POSBUF, 3 = VIACOMBO, 4 = COMBO, 5 = SKL+, 6 = FIFO).");
 module_param_array(bdl_pos_adj, int, NULL, 0644);
 MODULE_PARM_DESC(bdl_pos_adj, "BDL position adjustment offset.");
 module_param_array(probe_mask, int, NULL, 0444);
@@ -350,6 +351,11 @@ enum {
 #define AZX_DCAPS_PRESET_ATI_HDMI_NS \
 	(AZX_DCAPS_PRESET_ATI_HDMI | AZX_DCAPS_SNOOP_OFF)
 
+/* quirks for AMD SB */
+#define AZX_DCAPS_PRESET_AMD_SB \
+	(AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_AMD_WORKAROUND |\
+	 AZX_DCAPS_SNOOP_TYPE(ATI) | AZX_DCAPS_PM_RUNTIME)
+
 /* quirks for Nvidia */
 #define AZX_DCAPS_PRESET_NVIDIA \
 	(AZX_DCAPS_NO_MSI | AZX_DCAPS_CORBRP_SELF_CLEAR |\
@@ -920,6 +926,49 @@ static unsigned int azx_via_get_position(struct azx *chip,
 	return bound_pos + mod_dma_pos;
 }
 
+#define AMD_FIFO_SIZE	32
+
+/* get the current DMA position with FIFO size correction */
+static unsigned int azx_get_pos_fifo(struct azx *chip, struct azx_dev *azx_dev)
+{
+	struct snd_pcm_substream *substream = azx_dev->core.substream;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	unsigned int pos, delay;
+
+	pos = snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev));
+	if (!runtime)
+		return pos;
+
+	runtime->delay = AMD_FIFO_SIZE;
+	delay = frames_to_bytes(runtime, AMD_FIFO_SIZE);
+	if (azx_dev->insufficient) {
+		if (pos < delay) {
+			delay = pos;
+			runtime->delay = bytes_to_frames(runtime, pos);
+		} else {
+			azx_dev->insufficient = 0;
+		}
+	}
+
+	/* correct the DMA position for capture stream */
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		if (pos < delay)
+			pos += azx_dev->core.bufsize;
+		pos -= delay;
+	}
+
+	return pos;
+}
+
+static int azx_get_delay_from_fifo(struct azx *chip, struct azx_dev *azx_dev,
+				   unsigned int pos)
+{
+	struct snd_pcm_substream *substream = azx_dev->core.substream;
+
+	/* just read back the calculated value in the above */
+	return substream->runtime->delay;
+}
+
 static unsigned int azx_skl_get_dpib_pos(struct azx *chip,
 					 struct azx_dev *azx_dev)
 {
@@ -1528,6 +1577,7 @@ static int check_position_fix(struct azx *chip, int fix)
 	case POS_FIX_VIACOMBO:
 	case POS_FIX_COMBO:
 	case POS_FIX_SKL:
+	case POS_FIX_FIFO:
 		return fix;
 	}
 
@@ -1544,6 +1594,10 @@ static int check_position_fix(struct azx *chip, int fix)
 		dev_dbg(chip->card->dev, "Using VIACOMBO position fix\n");
 		return POS_FIX_VIACOMBO;
 	}
+	if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND) {
+		dev_dbg(chip->card->dev, "Using FIFO position fix\n");
+		return POS_FIX_FIFO;
+	}
 	if (chip->driver_caps & AZX_DCAPS_POSFIX_LPIB) {
 		dev_dbg(chip->card->dev, "Using LPIB position fix\n");
 		return POS_FIX_LPIB;
@@ -1564,6 +1618,7 @@ static void assign_position_fix(struct azx *chip, int fix)
 		[POS_FIX_VIACOMBO] = azx_via_get_position,
 		[POS_FIX_COMBO] = azx_get_pos_lpib,
 		[POS_FIX_SKL] = azx_get_pos_skl,
+		[POS_FIX_FIFO] = azx_get_pos_fifo,
 	};
 
 	chip->get_position[0] = chip->get_position[1] = callbacks[fix];
@@ -1578,6 +1633,9 @@ static void assign_position_fix(struct azx *chip, int fix)
 			azx_get_delay_from_lpib;
 	}
 
+	if (fix == POS_FIX_FIFO)
+		chip->get_delay[0] = chip->get_delay[1] =
+			azx_get_delay_from_fifo;
 }
 
 /*
@@ -2594,6 +2652,12 @@ static const struct pci_device_id azx_ids[] = {
 	/* AMD Hudson */
 	{ PCI_DEVICE(0x1022, 0x780d),
 	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
+	/* AMD, X370 & co */
+	{ PCI_DEVICE(0x1022, 0x1457),
+	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
+	/* AMD, X570 & co */
+	{ PCI_DEVICE(0x1022, 0x1487),
+	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
 	/* AMD Stoney */
 	{ PCI_DEVICE(0x1022, 0x157a),
 	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index b70fbfa..6f17b25 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -176,23 +176,10 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
 {
 	struct conexant_spec *spec = codec->spec;
 
-	switch (codec->core.vendor_id) {
-	case 0x14f12008: /* CX8200 */
-	case 0x14f150f2: /* CX20722 */
-	case 0x14f150f4: /* CX20724 */
-		break;
-	default:
-		return;
-	}
-
 	/* Turn the problematic codec into D3 to avoid spurious noises
 	   from the internal speaker during (and after) reboot */
 	cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
-
-	snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
-	snd_hda_codec_write(codec, codec->core.afg, 0,
-			    AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
-	msleep(10);
+	snd_hda_gen_reboot_notify(codec);
 }
 
 static void cx_auto_free(struct hda_codec *codec)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index dc19896..9b5caf0 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -868,15 +868,6 @@ static void alc_reboot_notify(struct hda_codec *codec)
 		alc_shutup(codec);
 }
 
-/* power down codec to D3 at reboot/shutdown; set as reboot_notify ops */
-static void alc_d3_at_reboot(struct hda_codec *codec)
-{
-	snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
-	snd_hda_codec_write(codec, codec->core.afg, 0,
-			    AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
-	msleep(10);
-}
-
 #define alc_free	snd_hda_gen_free
 
 #ifdef CONFIG_PM
@@ -5111,7 +5102,7 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
 	struct alc_spec *spec = codec->spec;
 
 	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
-		spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
+		spec->reboot_notify = snd_hda_gen_reboot_notify; /* reduce noise */
 		spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
 		codec->power_save_node = 0; /* avoid click noises */
 		snd_hda_apply_pincfgs(codec, pincfgs);
@@ -6851,6 +6842,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
 	SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+	SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
 	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
 	SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 160b276..6a8c279 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -1150,6 +1150,28 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
 	return ret;
 }
 
+static int davinci_mcasp_hw_rule_slot_width(struct snd_pcm_hw_params *params,
+					    struct snd_pcm_hw_rule *rule)
+{
+	struct davinci_mcasp_ruledata *rd = rule->private;
+	struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+	struct snd_mask nfmt;
+	int i, slot_width;
+
+	snd_mask_none(&nfmt);
+	slot_width = rd->mcasp->slot_width;
+
+	for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) {
+		if (snd_mask_test(fmt, i)) {
+			if (snd_pcm_format_width(i) <= slot_width) {
+				snd_mask_set(&nfmt, i);
+			}
+		}
+	}
+
+	return snd_mask_refine(fmt, &nfmt);
+}
+
 static const unsigned int davinci_mcasp_dai_rates[] = {
 	8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000,
 	88200, 96000, 176400, 192000,
@@ -1257,7 +1279,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
 	struct davinci_mcasp_ruledata *ruledata =
 					&mcasp->ruledata[substream->stream];
 	u32 max_channels = 0;
-	int i, dir;
+	int i, dir, ret;
 	int tdm_slots = mcasp->tdm_slots;
 
 	/* Do not allow more then one stream per direction */
@@ -1286,6 +1308,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
 			max_channels++;
 	}
 	ruledata->serializers = max_channels;
+	ruledata->mcasp = mcasp;
 	max_channels *= tdm_slots;
 	/*
 	 * If the already active stream has less channels than the calculated
@@ -1311,20 +1334,22 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
 				   0, SNDRV_PCM_HW_PARAM_CHANNELS,
 				   &mcasp->chconstr[substream->stream]);
 
-	if (mcasp->slot_width)
-		snd_pcm_hw_constraint_minmax(substream->runtime,
-					     SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
-					     8, mcasp->slot_width);
+	if (mcasp->slot_width) {
+		/* Only allow formats require <= slot_width bits on the bus */
+		ret = snd_pcm_hw_rule_add(substream->runtime, 0,
+					  SNDRV_PCM_HW_PARAM_FORMAT,
+					  davinci_mcasp_hw_rule_slot_width,
+					  ruledata,
+					  SNDRV_PCM_HW_PARAM_FORMAT, -1);
+		if (ret)
+			return ret;
+	}
 
 	/*
 	 * If we rely on implicit BCLK divider setting we should
 	 * set constraints based on what we can provide.
 	 */
 	if (mcasp->bclk_master && mcasp->bclk_div == 0 && mcasp->sysclk_freq) {
-		int ret;
-
-		ruledata->mcasp = mcasp;
-
 		ret = snd_pcm_hw_rule_add(substream->runtime, 0,
 					  SNDRV_PCM_HW_PARAM_RATE,
 					  davinci_mcasp_hw_rule_rate,
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index 60d43d5..11399f8 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -329,7 +329,6 @@ static int rockchip_i2s_hw_params(struct snd_pcm_substream *substream,
 		val |= I2S_CHN_4;
 		break;
 	case 2:
-	case 1:
 		val |= I2S_CHN_2;
 		break;
 	default:
@@ -462,7 +461,7 @@ static struct snd_soc_dai_driver rockchip_i2s_dai = {
 	},
 	.capture = {
 		.stream_name = "Capture",
-		.channels_min = 1,
+		.channels_min = 2,
 		.channels_max = 2,
 		.rates = SNDRV_PCM_RATE_8000_192000,
 		.formats = (SNDRV_PCM_FMTBIT_S8 |
@@ -662,7 +661,7 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
 	}
 
 	if (!of_property_read_u32(node, "rockchip,capture-channels", &val)) {
-		if (val >= 1 && val <= 8)
+		if (val >= 2 && val <= 8)
 			soc_dai->capture.channels_max = val;
 	}
 
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 727a1b4..eb99496 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1530,8 +1530,11 @@ static int soc_probe_link_dais(struct snd_soc_card *card,
 		}
 	}
 
-	if (dai_link->dai_fmt)
-		snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt);
+	if (dai_link->dai_fmt) {
+		ret = snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt);
+		if (ret)
+			return ret;
+	}
 
 	ret = soc_post_component_init(rtd, dai_link->name);
 	if (ret)
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 91cc574..b9ad15f 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -1147,8 +1147,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
 		list_add_tail(&widget->work_list, list);
 
 	if (custom_stop_condition && custom_stop_condition(widget, dir)) {
-		widget->endpoints[dir] = 1;
-		return widget->endpoints[dir];
+		list = NULL;
+		custom_stop_condition = NULL;
 	}
 
 	if ((widget->is_ep & SND_SOC_DAPM_DIR_TO_EP(dir)) && widget->connected) {
@@ -1185,8 +1185,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
  *
  * Optionally, can be supplied with a function acting as a stopping condition.
  * This function takes the dapm widget currently being examined and the walk
- * direction as an arguments, it should return true if the walk should be
- * stopped and false otherwise.
+ * direction as an arguments, it should return true if widgets from that point
+ * in the graph onwards should not be added to the widget list.
  */
 static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
 	struct list_head *list,
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index fa6e582..dc0c00a 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -2669,8 +2669,7 @@ int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
 
 		if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
 		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
-		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND) &&
-		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED))
+		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND))
 			continue;
 
 		dev_dbg(be->dev, "ASoC: prepare BE %s\n",
diff --git a/sound/sound_core.c b/sound/sound_core.c
index 40ad000..dd64c4b 100644
--- a/sound/sound_core.c
+++ b/sound/sound_core.c
@@ -280,7 +280,8 @@ static int sound_insert_unit(struct sound_unit **list, const struct file_operati
 				goto retry;
 			}
 			spin_unlock(&sound_loader_lock);
-			return -EBUSY;
+			r = -EBUSY;
+			goto fail;
 		}
 	}
 
diff --git a/sound/usb/hiface/pcm.c b/sound/usb/hiface/pcm.c
index e1fbb9c..a197fc3 100644
--- a/sound/usb/hiface/pcm.c
+++ b/sound/usb/hiface/pcm.c
@@ -604,14 +604,13 @@ int hiface_pcm_init(struct hiface_chip *chip, u8 extra_freq)
 		ret = hiface_pcm_init_urb(&rt->out_urbs[i], chip, OUT_EP,
 				    hiface_pcm_out_urb_handler);
 		if (ret < 0)
-			return ret;
+			goto error;
 	}
 
 	ret = snd_pcm_new(chip->card, "USB-SPDIF Audio", 0, 1, 0, &pcm);
 	if (ret < 0) {
-		kfree(rt);
 		dev_err(&chip->dev->dev, "Cannot create pcm instance\n");
-		return ret;
+		goto error;
 	}
 
 	pcm->private_data = rt;
@@ -624,4 +623,10 @@ int hiface_pcm_init(struct hiface_chip *chip, u8 extra_freq)
 
 	chip->pcm = rt;
 	return 0;
+
+error:
+	for (i = 0; i < PCM_N_URBS; i++)
+		kfree(rt->out_urbs[i].buffer);
+	kfree(rt);
+	return ret;
 }
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index e63a7d3..799d153 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -83,6 +83,7 @@ struct mixer_build {
 	unsigned char *buffer;
 	unsigned int buflen;
 	DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS);
+	DECLARE_BITMAP(termbitmap, MAX_ID_ELEMS);
 	struct usb_audio_term oterm;
 	const struct usbmix_name_map *map;
 	const struct usbmix_selector_map *selector_map;
@@ -759,6 +760,8 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
 		return -EINVAL;
 	if (!desc->bNrInPins)
 		return -EINVAL;
+	if (desc->bLength < sizeof(*desc) + desc->bNrInPins)
+		return -EINVAL;
 
 	switch (state->mixer->protocol) {
 	case UAC_VERSION_1:
@@ -788,16 +791,25 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
  * parse the source unit recursively until it reaches to a terminal
  * or a branched unit.
  */
-static int check_input_term(struct mixer_build *state, int id,
+static int __check_input_term(struct mixer_build *state, int id,
 			    struct usb_audio_term *term)
 {
 	int protocol = state->mixer->protocol;
 	int err;
 	void *p1;
+	unsigned char *hdr;
 
 	memset(term, 0, sizeof(*term));
-	while ((p1 = find_audio_control_unit(state, id)) != NULL) {
-		unsigned char *hdr = p1;
+	for (;;) {
+		/* a loop in the terminal chain? */
+		if (test_and_set_bit(id, state->termbitmap))
+			return -EINVAL;
+
+		p1 = find_audio_control_unit(state, id);
+		if (!p1)
+			break;
+
+		hdr = p1;
 		term->id = id;
 
 		if (protocol == UAC_VERSION_1 || protocol == UAC_VERSION_2) {
@@ -815,7 +827,7 @@ static int check_input_term(struct mixer_build *state, int id,
 
 					/* call recursively to verify that the
 					 * referenced clock entity is valid */
-					err = check_input_term(state, d->bCSourceID, term);
+					err = __check_input_term(state, d->bCSourceID, term);
 					if (err < 0)
 						return err;
 
@@ -849,7 +861,7 @@ static int check_input_term(struct mixer_build *state, int id,
 			case UAC2_CLOCK_SELECTOR: {
 				struct uac_selector_unit_descriptor *d = p1;
 				/* call recursively to retrieve the channel info */
-				err = check_input_term(state, d->baSourceID[0], term);
+				err = __check_input_term(state, d->baSourceID[0], term);
 				if (err < 0)
 					return err;
 				term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
@@ -912,7 +924,7 @@ static int check_input_term(struct mixer_build *state, int id,
 
 				/* call recursively to verify that the
 				 * referenced clock entity is valid */
-				err = check_input_term(state, d->bCSourceID, term);
+				err = __check_input_term(state, d->bCSourceID, term);
 				if (err < 0)
 					return err;
 
@@ -963,7 +975,7 @@ static int check_input_term(struct mixer_build *state, int id,
 			case UAC3_CLOCK_SELECTOR: {
 				struct uac_selector_unit_descriptor *d = p1;
 				/* call recursively to retrieve the channel info */
-				err = check_input_term(state, d->baSourceID[0], term);
+				err = __check_input_term(state, d->baSourceID[0], term);
 				if (err < 0)
 					return err;
 				term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
@@ -979,7 +991,7 @@ static int check_input_term(struct mixer_build *state, int id,
 					return -EINVAL;
 
 				/* call recursively to retrieve the channel info */
-				err = check_input_term(state, d->baSourceID[0], term);
+				err = __check_input_term(state, d->baSourceID[0], term);
 				if (err < 0)
 					return err;
 
@@ -997,6 +1009,15 @@ static int check_input_term(struct mixer_build *state, int id,
 	return -ENODEV;
 }
 
+
+static int check_input_term(struct mixer_build *state, int id,
+			    struct usb_audio_term *term)
+{
+	memset(term, 0, sizeof(*term));
+	memset(state->termbitmap, 0, sizeof(state->termbitmap));
+	return __check_input_term(state, id, term);
+}
+
 /*
  * Feature Unit
  */
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index f6ce6d5..fa2cc4a 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -1058,6 +1058,7 @@ snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip,
 
 		pd = kzalloc(sizeof(*pd), GFP_KERNEL);
 		if (!pd) {
+			kfree(fp->chmap);
 			kfree(fp->rate_table);
 			kfree(fp);
 			return NULL;
diff --git a/tools/perf/arch/s390/util/machine.c b/tools/perf/arch/s390/util/machine.c
index a19690a..c8c86a0 100644
--- a/tools/perf/arch/s390/util/machine.c
+++ b/tools/perf/arch/s390/util/machine.c
@@ -6,8 +6,9 @@
 #include "machine.h"
 #include "api/fs/fs.h"
 #include "debug.h"
+#include "symbol.h"
 
-int arch__fix_module_text_start(u64 *start, const char *name)
+int arch__fix_module_text_start(u64 *start, u64 *size, const char *name)
 {
 	u64 m_start = *start;
 	char path[PATH_MAX];
@@ -17,7 +18,35 @@ int arch__fix_module_text_start(u64 *start, const char *name)
 	if (sysfs__read_ull(path, (unsigned long long *)start) < 0) {
 		pr_debug2("Using module %s start:%#lx\n", path, m_start);
 		*start = m_start;
+	} else {
+		/* Successful read of the modules segment text start address.
+		 * Calculate difference between module start address
+		 * in memory and module text segment start address.
+		 * For example module load address is 0x3ff8011b000
+		 * (from /proc/modules) and module text segment start
+		 * address is 0x3ff8011b870 (from file above).
+		 *
+		 * Adjust the module size and subtract the GOT table
+		 * size located at the beginning of the module.
+		 */
+		*size -= (*start - m_start);
 	}
 
 	return 0;
 }
+
+/* On s390 kernel text segment start is located at very low memory addresses,
+ * for example 0x10000. Modules are located at very high memory addresses,
+ * for example 0x3ff xxxx xxxx. The gap between end of kernel text segment
+ * and beginning of first module's text segment is very big.
+ * Therefore do not fill this gap and do not assign it to the kernel dso map.
+ */
+void arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
+{
+	if (strchr(p->name, '[') == NULL && strchr(c->name, '['))
+		/* Last kernel symbol mapped to end of page */
+		p->end = roundup(p->end, page_size);
+	else
+		p->end = c->start;
+	pr_debug4("%s sym:%s end:%#lx\n", __func__, p->name, p->end);
+}
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index fa56fde..91c0a44 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -378,8 +378,10 @@ static u8 *alloc_data(ssize_t bytes0, int map_flags,
 
 	/* Allocate and initialize all memory on CPU#0: */
 	if (init_cpu0) {
-		orig_mask = bind_to_node(0);
-		bind_to_memnode(0);
+		int node = numa_node_of_cpu(0);
+
+		orig_mask = bind_to_node(node);
+		bind_to_memnode(node);
 	}
 
 	bytes = bytes0 + HPSIZE;
diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
index f42f228..1379551 100644
--- a/tools/perf/builtin-ftrace.c
+++ b/tools/perf/builtin-ftrace.c
@@ -174,7 +174,7 @@ static int set_tracing_cpumask(struct cpu_map *cpumap)
 	int last_cpu;
 
 	last_cpu = cpu_map__cpu(cpumap, cpumap->nr - 1);
-	mask_size = (last_cpu + 3) / 4 + 1;
+	mask_size = last_cpu / 4 + 2; /* one more byte for EOS */
 	mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */
 
 	cpumask = malloc(mask_size);
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 99de916..0bdb34f 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -711,6 +711,16 @@ __cmd_probe(int argc, const char **argv)
 
 		ret = perf_add_probe_events(params.events, params.nevents);
 		if (ret < 0) {
+
+			/*
+			 * When perf_add_probe_events() fails it calls
+			 * cleanup_perf_probe_events(pevs, npevs), i.e.
+			 * cleanup_perf_probe_events(params.events, params.nevents), which
+			 * will call clear_perf_probe_event(), so set nevents to zero
+			 * to avoid cleanup_params() to call clear_perf_probe_event() again
+			 * on the same pevs.
+			 */
+			params.nevents = 0;
 			pr_err_with_code("  Error: Failed to add events.", ret);
 			return ret;
 		}
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
index 68c92bb..6b36b71 100644
--- a/tools/perf/pmu-events/jevents.c
+++ b/tools/perf/pmu-events/jevents.c
@@ -450,6 +450,7 @@ static struct fixed {
 	{ "inst_retired.any_p", "event=0xc0" },
 	{ "cpu_clk_unhalted.ref", "event=0x0,umask=0x03" },
 	{ "cpu_clk_unhalted.thread", "event=0x3c" },
+	{ "cpu_clk_unhalted.core", "event=0x3c" },
 	{ "cpu_clk_unhalted.thread_any", "event=0x3c,any=1" },
 	{ NULL, NULL},
 };
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 383674f..f93846e 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -701,7 +701,10 @@ size_t cpu_map__snprint_mask(struct cpu_map *map, char *buf, size_t size)
 	unsigned char *bitmap;
 	int last_cpu = cpu_map__cpu(map, map->nr - 1);
 
-	bitmap = zalloc((last_cpu + 7) / 8);
+	if (buf == NULL)
+		return 0;
+
+	bitmap = zalloc(last_cpu / 8 + 1);
 	if (bitmap == NULL) {
 		buf[0] = '\0';
 		return 0;
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 7f2e3b1..54c34c1 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -3285,6 +3285,13 @@ int perf_session__read_header(struct perf_session *session)
 			   data->file.path);
 	}
 
+	if (f_header.attr_size == 0) {
+		pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
+		       "Was the 'perf record' command properly terminated?\n",
+		       data->file.path);
+		return -EINVAL;
+	}
+
 	nr_attrs = f_header.attrs.size / f_header.attr_size;
 	lseek(fd, f_header.attrs.offset, SEEK_SET);
 
@@ -3365,7 +3372,7 @@ int perf_event__synthesize_attr(struct perf_tool *tool,
 	size += sizeof(struct perf_event_header);
 	size += ids * sizeof(u64);
 
-	ev = malloc(size);
+	ev = zalloc(size);
 
 	if (ev == NULL)
 		return -ENOMEM;
@@ -3472,7 +3479,7 @@ int perf_event__process_feature(struct perf_tool *tool,
 		return 0;
 
 	ff.buf  = (void *)fe->data;
-	ff.size = event->header.size - sizeof(event->header);
+	ff.size = event->header.size - sizeof(*fe);
 	ff.ph = &session->header;
 
 	if (feat_ops[feat].process(&ff, NULL))
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 076718a..003b70d 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1295,6 +1295,7 @@ static int machine__set_modules_path(struct machine *machine)
 	return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
 }
 int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
+				u64 *size __maybe_unused,
 				const char *name __maybe_unused)
 {
 	return 0;
@@ -1306,7 +1307,7 @@ static int machine__create_module(void *arg, const char *name, u64 start,
 	struct machine *machine = arg;
 	struct map *map;
 
-	if (arch__fix_module_text_start(&start, name) < 0)
+	if (arch__fix_module_text_start(&start, &size, name) < 0)
 		return -1;
 
 	map = machine__findnew_module_map(machine, start, name);
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index ebde3ea..6f37678 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -219,7 +219,7 @@ struct symbol *machine__find_kernel_symbol_by_name(struct machine *machine,
 
 struct map *machine__findnew_module_map(struct machine *machine, u64 start,
 					const char *filename);
-int arch__fix_module_text_start(u64 *start, const char *name);
+int arch__fix_module_text_start(u64 *start, u64 *size, const char *name);
 
 int machine__load_kallsyms(struct machine *machine, const char *filename);
 
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 0715f97..91404ba 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -86,6 +86,11 @@ static int prefix_underscores_count(const char *str)
 	return tail - str;
 }
 
+void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
+{
+	p->end = c->start;
+}
+
 const char * __weak arch__normalize_symbol_name(const char *name)
 {
 	return name;
@@ -212,7 +217,7 @@ void symbols__fixup_end(struct rb_root *symbols)
 		curr = rb_entry(nd, struct symbol, rb_node);
 
 		if (prev->end == prev->start && prev->end != curr->start)
-			prev->end = curr->start;
+			arch__symbols__fixup_end(prev, curr);
 	}
 
 	/* Last entry */
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index f25fae4..76ef2fa 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -349,6 +349,7 @@ const char *arch__normalize_symbol_name(const char *name);
 #define SYMBOL_A 0
 #define SYMBOL_B 1
 
+void arch__symbols__fixup_end(struct symbol *p, struct symbol *c);
 int arch__compare_symbol_names(const char *namea, const char *nameb);
 int arch__compare_symbol_names_n(const char *namea, const char *nameb,
 				 unsigned int n);
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 56007a7..2c146d0 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -192,14 +192,24 @@ struct comm *thread__comm(const struct thread *thread)
 
 struct comm *thread__exec_comm(const struct thread *thread)
 {
-	struct comm *comm, *last = NULL;
+	struct comm *comm, *last = NULL, *second_last = NULL;
 
 	list_for_each_entry(comm, &thread->comm_list, list) {
 		if (comm->exec)
 			return comm;
+		second_last = last;
 		last = comm;
 	}
 
+	/*
+	 * 'last' with no start time might be the parent's comm of a synthesized
+	 * thread (created by processing a synthesized fork event). For a main
+	 * thread, that is very probably wrong. Prefer a later comm to avoid
+	 * that case.
+	 */
+	if (second_last && !last->start && thread->pid_ == thread->tid)
+		return second_last;
+
 	return last;
 }
 
diff --git a/tools/testing/selftests/bpf/sendmsg6_prog.c b/tools/testing/selftests/bpf/sendmsg6_prog.c
index 5aeaa28..a680628 100644
--- a/tools/testing/selftests/bpf/sendmsg6_prog.c
+++ b/tools/testing/selftests/bpf/sendmsg6_prog.c
@@ -41,8 +41,7 @@ int sendmsg_v6_prog(struct bpf_sock_addr *ctx)
 	}
 
 	/* Rewrite destination. */
-	if ((ctx->user_ip6[0] & 0xFFFF) == bpf_htons(0xFACE) &&
-	     ctx->user_ip6[0] >> 16 == bpf_htons(0xB00C)) {
+	if (ctx->user_ip6[0] == bpf_htonl(0xFACEB00C)) {
 		ctx->user_ip6[0] = bpf_htonl(DST_REWRITE_IP6_0);
 		ctx->user_ip6[1] = bpf_htonl(DST_REWRITE_IP6_1);
 		ctx->user_ip6[2] = bpf_htonl(DST_REWRITE_IP6_2);
diff --git a/tools/testing/selftests/kvm/config b/tools/testing/selftests/kvm/config
new file mode 100644
index 0000000..63ed533
--- /dev/null
+++ b/tools/testing/selftests/kvm/config
@@ -0,0 +1,3 @@
+CONFIG_KVM=y
+CONFIG_KVM_INTEL=y
+CONFIG_KVM_AMD=y
diff --git a/tools/testing/selftests/net/forwarding/gre_multipath.sh b/tools/testing/selftests/net/forwarding/gre_multipath.sh
index cca2baa..a8d8e8b 100755
--- a/tools/testing/selftests/net/forwarding/gre_multipath.sh
+++ b/tools/testing/selftests/net/forwarding/gre_multipath.sh
@@ -93,18 +93,10 @@
 	ip route add vrf v$ol1 192.0.2.16/28 \
 	   nexthop dev g1a \
 	   nexthop dev g1b
-
-	tc qdisc add dev $ul1 clsact
-	tc filter add dev $ul1 egress pref 111 prot ipv4 \
-	   flower dst_ip 192.0.2.66 action pass
-	tc filter add dev $ul1 egress pref 222 prot ipv4 \
-	   flower dst_ip 192.0.2.82 action pass
 }
 
 sw1_destroy()
 {
-	tc qdisc del dev $ul1 clsact
-
 	ip route del vrf v$ol1 192.0.2.16/28
 
 	ip route del vrf v$ol1 192.0.2.82/32 via 192.0.2.146
@@ -139,10 +131,18 @@
 	ip route add vrf v$ol2 192.0.2.0/28 \
 	   nexthop dev g2a \
 	   nexthop dev g2b
+
+	tc qdisc add dev $ul2 clsact
+	tc filter add dev $ul2 ingress pref 111 prot 802.1Q \
+	   flower vlan_id 111 action pass
+	tc filter add dev $ul2 ingress pref 222 prot 802.1Q \
+	   flower vlan_id 222 action pass
 }
 
 sw2_destroy()
 {
+	tc qdisc del dev $ul2 clsact
+
 	ip route del vrf v$ol2 192.0.2.0/28
 
 	ip route del vrf v$ol2 192.0.2.81/32 via 192.0.2.145
@@ -187,12 +187,16 @@
 	sw1_create
 	sw2_create
 	h2_create
+
+	forwarding_enable
 }
 
 cleanup()
 {
 	pre_cleanup
 
+	forwarding_restore
+
 	h2_destroy
 	sw2_destroy
 	sw1_destroy
@@ -211,15 +215,15 @@
 	   nexthop dev g1a weight $weight1 \
 	   nexthop dev g1b weight $weight2
 
-	local t0_111=$(tc_rule_stats_get $ul1 111 egress)
-	local t0_222=$(tc_rule_stats_get $ul1 222 egress)
+	local t0_111=$(tc_rule_stats_get $ul2 111 ingress)
+	local t0_222=$(tc_rule_stats_get $ul2 222 ingress)
 
 	ip vrf exec v$h1 \
 	   $MZ $h1 -q -p 64 -A 192.0.2.1 -B 192.0.2.18 \
 	       -d 1msec -t udp "sp=1024,dp=0-32768"
 
-	local t1_111=$(tc_rule_stats_get $ul1 111 egress)
-	local t1_222=$(tc_rule_stats_get $ul1 222 egress)
+	local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
+	local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
 
 	local d111=$((t1_111 - t0_111))
 	local d222=$((t1_222 - t0_222))
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 02bac8a..d982650 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -338,6 +338,17 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
 {
 	kvm_timer_schedule(vcpu);
+	/*
+	 * If we're about to block (most likely because we've just hit a
+	 * WFI), we need to sync back the state of the GIC CPU interface
+	 * so that we have the lastest PMR and group enables. This ensures
+	 * that kvm_arch_vcpu_runnable has up-to-date data to decide
+	 * whether we have pending interrupts.
+	 */
+	preempt_disable();
+	kvm_vgic_vmcr_sync(vcpu);
+	preempt_enable();
+
 	kvm_vgic_v4_enable_doorbell(vcpu);
 }
 
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 69b892a..57281c1 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -495,10 +495,17 @@ void vgic_v2_load(struct kvm_vcpu *vcpu)
 		       kvm_vgic_global_state.vctrl_base + GICH_APR);
 }
 
-void vgic_v2_put(struct kvm_vcpu *vcpu)
+void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu)
 {
 	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
 
 	cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
+}
+
+void vgic_v2_put(struct kvm_vcpu *vcpu)
+{
+	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+
+	vgic_v2_vmcr_sync(vcpu);
 	cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);
 }
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 3f2350a..5c55995 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -674,12 +674,17 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
 		__vgic_v3_activate_traps(vcpu);
 }
 
-void vgic_v3_put(struct kvm_vcpu *vcpu)
+void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
 {
 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
 
 	if (likely(cpu_if->vgic_sre))
 		cpu_if->vgic_vmcr = kvm_call_hyp(__vgic_v3_read_vmcr);
+}
+
+void vgic_v3_put(struct kvm_vcpu *vcpu)
+{
+	vgic_v3_vmcr_sync(vcpu);
 
 	kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
 
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index c5165e3..250cd72 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -902,6 +902,17 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu)
 		vgic_v3_put(vcpu);
 }
 
+void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
+{
+	if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
+		return;
+
+	if (kvm_vgic_global_state.type == VGIC_V2)
+		vgic_v2_vmcr_sync(vcpu);
+	else
+		vgic_v3_vmcr_sync(vcpu);
+}
+
 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
 {
 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index a9002471..d5e4542 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -204,6 +204,7 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
 void vgic_v2_init_lrs(void);
 void vgic_v2_load(struct kvm_vcpu *vcpu);
 void vgic_v2_put(struct kvm_vcpu *vcpu);
+void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu);
 
 void vgic_v2_save_state(struct kvm_vcpu *vcpu);
 void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
@@ -234,6 +235,7 @@ bool vgic_v3_check_base(struct kvm *kvm);
 
 void vgic_v3_load(struct kvm_vcpu *vcpu);
 void vgic_v3_put(struct kvm_vcpu *vcpu);
+void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu);
 
 bool vgic_has_its(struct kvm *kvm);
 int kvm_vgic_register_its_device(void);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 2b36a51..4a584a5 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2317,6 +2317,29 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
 #endif
 }
 
+/*
+ * Unlike kvm_arch_vcpu_runnable, this function is called outside
+ * a vcpu_load/vcpu_put pair.  However, for most architectures
+ * kvm_arch_vcpu_runnable does not require vcpu_load.
+ */
+bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
+{
+	return kvm_arch_vcpu_runnable(vcpu);
+}
+
+static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
+{
+	if (kvm_arch_dy_runnable(vcpu))
+		return true;
+
+#ifdef CONFIG_KVM_ASYNC_PF
+	if (!list_empty_careful(&vcpu->async_pf.done))
+		return true;
+#endif
+
+	return false;
+}
+
 void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
 {
 	struct kvm *kvm = me->kvm;
@@ -2346,7 +2369,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
 				continue;
 			if (vcpu == me)
 				continue;
-			if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
+			if (swait_active(&vcpu->wq) && !vcpu_dy_runnable(vcpu))
 				continue;
 			if (yield_to_kernel_mode && !kvm_arch_vcpu_in_kernel(vcpu))
 				continue;