Merge "mhi: core: timeout if sync power up fails to enter mission mode"
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 8923a10..4834753 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2510,8 +2510,8 @@
 			http://repo.or.cz/w/linux-2.6/mini2440.git
 
 	mitigations=
-			[X86,PPC,S390] Control optional mitigations for CPU
-			vulnerabilities.  This is a set of curated,
+			[X86,PPC,S390,ARM64] Control optional mitigations for
+			CPU vulnerabilities.  This is a set of curated,
 			arch-independent options, each of which is an
 			aggregation of existing arch-specific options.
 
@@ -2520,12 +2520,14 @@
 				improves system performance, but it may also
 				expose users to several CPU vulnerabilities.
 				Equivalent to: nopti [X86,PPC]
+					       kpti=0 [ARM64]
 					       nospectre_v1 [PPC]
 					       nobp=0 [S390]
 					       nospectre_v1 [X86]
-					       nospectre_v2 [X86,PPC,S390]
+					       nospectre_v2 [X86,PPC,S390,ARM64]
 					       spectre_v2_user=off [X86]
 					       spec_store_bypass_disable=off [X86,PPC]
+					       ssbd=force-off [ARM64]
 					       l1tf=off [X86]
 					       mds=off [X86]
 
@@ -2873,10 +2875,10 @@
 			(bounds check bypass). With this option data leaks
 			are possible in the system.
 
-	nospectre_v2	[X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
-			(indirect branch prediction) vulnerability. System may
-			allow data leaks with this option, which is equivalent
-			to spectre_v2=off.
+	nospectre_v2	[X86,PPC_FSL_BOOK3E,ARM64] Disable all mitigations for
+			the Spectre variant 2 (indirect branch prediction)
+			vulnerability. System may allow data leaks with this
+			option.
 
 	nospec_store_bypass_disable
 			[HW] Disable all mitigations for the Speculative Store Bypass vulnerability
diff --git a/Documentation/arm64/elf_hwcaps.txt b/Documentation/arm64/elf_hwcaps.txt
index d6aff2c..6feaffe 100644
--- a/Documentation/arm64/elf_hwcaps.txt
+++ b/Documentation/arm64/elf_hwcaps.txt
@@ -178,3 +178,7 @@
 HWCAP_FLAGM
 
     Functionality implied by ID_AA64ISAR0_EL1.TS == 0b0001.
+
+HWCAP_SSBS
+
+    Functionality implied by ID_AA64PFR1_EL1.SSBS == 0b0010.
diff --git a/Documentation/usb/rio.txt b/Documentation/usb/rio.txt
deleted file mode 100644
index aee715a..0000000
--- a/Documentation/usb/rio.txt
+++ /dev/null
@@ -1,138 +0,0 @@
-Copyright (C) 1999, 2000 Bruce Tenison
-Portions Copyright (C) 1999, 2000 David Nelson
-Thanks to David Nelson for guidance and the usage of the scanner.txt
-and scanner.c files to model our driver and this informative file.
-
-Mar. 2, 2000
-
-CHANGES
-
-- Initial Revision
-
-
-OVERVIEW
-
-This README will address issues regarding how to configure the kernel
-to access a RIO 500 mp3 player.  
-Before I explain how to use this to access the Rio500 please be warned:
-
-W A R N I N G:
---------------
-
-Please note that this software is still under development.  The authors
-are in no way responsible for any damage that may occur, no matter how
-inconsequential.
-
-It seems that the Rio has a problem when sending .mp3 with low batteries.
-I suggest when the batteries are low and you want to transfer stuff that you
-replace it with a fresh one. In my case, what happened is I lost two 16kb
-blocks (they are no longer usable to store information to it). But I don't
-know if that's normal or not; it could simply be a problem with the flash 
-memory.
-
-In an extreme case, I left my Rio playing overnight and the batteries wore 
-down to nothing and appear to have corrupted the flash memory. My RIO 
-needed to be replaced as a result.  Diamond tech support is aware of the 
-problem.  Do NOT allow your batteries to wear down to nothing before 
-changing them.  It appears RIO 500 firmware does not handle low battery 
-power well at all. 
-
-On systems with OHCI controllers, the kernel OHCI code appears to have 
-power on problems with some chipsets.  If you are having problems 
-connecting to your RIO 500, try turning it on first and then plugging it 
-into the USB cable.  
-
-Contact information:
---------------------
-
-   The main page for the project is hosted at sourceforge.net in the following
-   URL: <http://rio500.sourceforge.net>. You can also go to the project's
-   sourceforge home page at: <http://sourceforge.net/projects/rio500/>.
-   There is also a mailing list: rio500-users@lists.sourceforge.net
-
-Authors:
--------
-
-Most of the code was written by Cesar Miquel <miquel@df.uba.ar>. Keith 
-Clayton <kclayton@jps.net> is incharge of the PPC port and making sure
-things work there. Bruce Tenison <btenison@dibbs.net> is adding support
-for .fon files and also does testing. The program will mostly sure be
-re-written and Pete Ikusz along with the rest will re-design it. I would
-also like to thank Tri Nguyen <tmn_3022000@hotmail.com> who provided use 
-with some important information regarding the communication with the Rio.
-
-ADDITIONAL INFORMATION and Userspace tools
-
-http://rio500.sourceforge.net/
-
-
-REQUIREMENTS
-
-A host with a USB port.  Ideally, either a UHCI (Intel) or OHCI
-(Compaq and others) hardware port should work.
-
-A Linux development kernel (2.3.x) with USB support enabled or a
-backported version to linux-2.2.x.  See http://www.linux-usb.org for
-more information on accomplishing this.
-
-A Linux kernel with RIO 500 support enabled.
-
-'lspci' which is only needed to determine the type of USB hardware
-available in your machine.
-
-CONFIGURATION
-
-Using `lspci -v`, determine the type of USB hardware available.
-
-  If you see something like:
-
-    USB Controller: ......
-    Flags: .....
-    I/O ports at ....
-
-  Then you have a UHCI based controller.
-
-  If you see something like:
-
-     USB Controller: .....
-     Flags: ....
-     Memory at .....
-
-  Then you have a OHCI based controller.
-
-Using `make menuconfig` or your preferred method for configuring the
-kernel, select 'Support for USB', 'OHCI/UHCI' depending on your
-hardware (determined from the steps above), 'USB Diamond Rio500 support', and
-'Preliminary USB device filesystem'.  Compile and install the modules
-(you may need to execute `depmod -a` to update the module
-dependencies).
-
-Add a device for the USB rio500:
-  `mknod /dev/usb/rio500 c 180 64`
-
-Set appropriate permissions for /dev/usb/rio500 (don't forget about
-group and world permissions).  Both read and write permissions are
-required for proper operation.
-
-Load the appropriate modules (if compiled as modules):
-
-  OHCI:
-    modprobe usbcore
-    modprobe usb-ohci
-    modprobe rio500
-
-  UHCI:
-    modprobe usbcore
-    modprobe usb-uhci  (or uhci)
-    modprobe rio500
-
-That's it.  The Rio500 Utils at: http://rio500.sourceforge.net should
-be able to access the rio500.
-
-BUGS
-
-If you encounter any problems feel free to drop me an email.
-
-Bruce Tenison
-btenison@dibbs.net
-
diff --git a/MAINTAINERS b/MAINTAINERS
index 831aeac..923cd02 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -15125,13 +15125,6 @@
 S:	Maintained
 F:	drivers/net/usb/dm9601.c
 
-USB DIAMOND RIO500 DRIVER
-M:	Cesar Miquel <miquel@df.uba.ar>
-L:	rio500-users@lists.sourceforge.net
-W:	http://rio500.sourceforge.net
-S:	Maintained
-F:	drivers/usb/misc/rio500*
-
 USB EHCI DRIVER
 M:	Alan Stern <stern@rowland.harvard.edu>
 L:	linux-usb@vger.kernel.org
diff --git a/Makefile b/Makefile
index a47861d..37589dd 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 4
 PATCHLEVEL = 19
-SUBLEVEL = 78
+SUBLEVEL = 81
 EXTRAVERSION =
 NAME = "People's Front"
 
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 7574964..ded1724 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -10,6 +10,10 @@
 #
 # Copyright (C) 1995-2001 by Russell King
 
+ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
+export DTC_FLAGS := -@
+endif
+
 LDFLAGS_vmlinux	:=-p --no-undefined -X --pic-veneer
 ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
 LDFLAGS_vmlinux	+= --be8
@@ -36,7 +40,7 @@
 endif
 
 ifeq ($(CONFIG_FRAME_POINTER),y)
-KBUILD_CFLAGS	+=-fno-omit-frame-pointer -mapcs -mno-sched-prolog
+KBUILD_CFLAGS  +=-fno-omit-frame-pointer $(call cc-option,-mapcs,) $(call cc-option,-mno-sched-prolog,)
 endif
 
 ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
@@ -314,6 +318,8 @@
 KBUILD_DTBS := dtbs
 endif
 
+DTSSUBDIR       := vendor/qcom
+
 all:	$(notdir $(KBUILD_IMAGE)) $(KBUILD_DTBS)
 
 
@@ -348,6 +354,7 @@
 
 dtbs: prepare scripts
 	$(Q)$(MAKE) $(build)=$(boot)/dts
+	$(foreach DIR, $(DTSSUBDIR), $(Q)$(MAKE) $(build)=$(boot)/dts/$(DIR) MACHINE=$(MACHINE) dtbs)
 
 dtbs_install:
 	$(Q)$(MAKE) $(dtbinst)=$(boot)/dts
@@ -359,7 +366,10 @@
 endif
 
 zImage-dtb: vmlinux scripts dtbs
-	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
+	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) DTSSUBDIR=$(DTSSUBDIR) $(boot)/$@
+
+Image-dtb: vmlinux scripts dtbs
+	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) DTSSUBDIR=$(DTSSUBDIR) $(boot)/$@
 
 # We use MRPROPER_FILES and CLEAN_FILES now
 archclean:
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 3e3199a..c7f62a8 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -33,10 +33,20 @@
 DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE_NAMES))
 ifneq ($(DTB_NAMES),)
 DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES))
-else
-DTB_LIST := $(dtb-y)
-endif
 DTB_OBJS := $(addprefix $(obj)/dts/,$(DTB_LIST))
+else
+# Use the same way as ARM64 does to have dtb files appended
+#  to kernel image.
+# For dt overlay support, currently there isn't have any
+# uniform list for dtb files. There is only one uniform list
+# for overlay's dtbo files which is managered by dtbo-y. And
+# dtb files are derived from each dtbo file's dtbo-base. so
+# it use a simple way just to find all dtb files which
+# generated during the build.
+# Note that dtb obj directory will always be cleaned at the
+# beginning of kernel build.
+DTB_OBJS := $(shell find $(obj)/dts/ -name \*.dtb)
+endif
 
 ifeq ($(CONFIG_XIP_KERNEL),y)
 
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index d4b7c59..cf1e4f7 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -1142,6 +1142,8 @@
 				ti,hwmods = "dss_dispc";
 				clocks = <&disp_clk>;
 				clock-names = "fck";
+
+				max-memory-bandwidth = <230000000>;
 			};
 
 			rfbi: rfbi@4832a800 {
diff --git a/arch/arm/boot/dts/vendor/qcom/Makefile b/arch/arm/boot/dts/vendor/qcom/Makefile
new file mode 100644
index 0000000..541a6d1
--- /dev/null
+++ b/arch/arm/boot/dts/vendor/qcom/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+include $(srctree)/arch/arm64/boot/dts/vendor/qcom/Makefile
+$(obj)/%.dtb: $(src)/../../../../../arm64/boot/dts/vendor/qcom/%.dts FORCE
+	$(call if_changed_dep,dtc)
+
+ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
+$(obj)/%.dtbo:$(src)/../../../../../arm64/boot/dts/vendor/qcom/%.dts FORCE
+	$(call if_changed_dep,dtc)
+	$(call if_changed,dtbo_verify)
+
+dtbs: $(addprefix $(obj)/,$(dtb-y)) $(addprefix $(obj)/,$(dtbo-y))
+else
+dtbs: $(addprefix $(obj)/,$(dtb-y))
+endif
+clean-files := *.dtb
diff --git a/arch/arm/configs/badge4_defconfig b/arch/arm/configs/badge4_defconfig
index 5ae5b52..ef484c4 100644
--- a/arch/arm/configs/badge4_defconfig
+++ b/arch/arm/configs/badge4_defconfig
@@ -91,7 +91,6 @@
 CONFIG_USB_SERIAL_CYBERJACK=m
 CONFIG_USB_SERIAL_XIRCOM=m
 CONFIG_USB_SERIAL_OMNINET=m
-CONFIG_USB_RIO500=m
 CONFIG_EXT2_FS=m
 CONFIG_EXT3_FS=m
 CONFIG_MSDOS_FS=y
diff --git a/arch/arm/configs/corgi_defconfig b/arch/arm/configs/corgi_defconfig
index 09e1672..0ba8df0 100644
--- a/arch/arm/configs/corgi_defconfig
+++ b/arch/arm/configs/corgi_defconfig
@@ -197,7 +197,6 @@
 CONFIG_USB_SERIAL_OMNINET=m
 CONFIG_USB_EMI62=m
 CONFIG_USB_EMI26=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYTHERM=m
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index 6bb506e..cc63d09 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -588,7 +588,6 @@
 CONFIG_USB_SERIAL_OMNINET=m
 CONFIG_USB_EMI62=m
 CONFIG_USB_EMI26=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYTHERM=m
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index 2afb359..bd71d5b 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -334,7 +334,6 @@
 CONFIG_USB_EMI26=m
 CONFIG_USB_ADUTUX=m
 CONFIG_USB_SEVSEG=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYPRESS_CY7C63=m
diff --git a/arch/arm/configs/spitz_defconfig b/arch/arm/configs/spitz_defconfig
index 9ea82c1..3aff4ca 100644
--- a/arch/arm/configs/spitz_defconfig
+++ b/arch/arm/configs/spitz_defconfig
@@ -191,7 +191,6 @@
 CONFIG_USB_SERIAL_OMNINET=m
 CONFIG_USB_EMI62=m
 CONFIG_USB_EMI26=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYTHERM=m
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index 6821f12..8f36119 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -7,6 +7,7 @@
 #include <linux/mm_types.h>
 #include <linux/scatterlist.h>
 #include <linux/dma-debug.h>
+#include <linux/dma-mapping-fast.h>
 #include <linux/kref.h>
 
 #define ARM_MAPPING_ERROR		(~(dma_addr_t)0x0)
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
index 9ded7bf..3b8fe01 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
@@ -946,7 +946,8 @@ static struct omap_hwmod_class_sysconfig am33xx_timer_sysc = {
 	.rev_offs	= 0x0000,
 	.sysc_offs	= 0x0010,
 	.syss_offs	= 0x0014,
-	.sysc_flags	= (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
+	.sysc_flags	= SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+			  SYSC_HAS_RESET_STATUS,
 	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
 			  SIDLE_SMART_WKUP),
 	.sysc_fields	= &omap_hwmod_sysc_type2,
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index ca03af8..ddf96ad 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -77,83 +77,6 @@ int omap_pm_clkdms_setup(struct clockdomain *clkdm, void *unused)
 	return 0;
 }
 
-/*
- * This API is to be called during init to set the various voltage
- * domains to the voltage as per the opp table. Typically we boot up
- * at the nominal voltage. So this function finds out the rate of
- * the clock associated with the voltage domain, finds out the correct
- * opp entry and sets the voltage domain to the voltage specified
- * in the opp entry
- */
-static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name,
-					 const char *oh_name)
-{
-	struct voltagedomain *voltdm;
-	struct clk *clk;
-	struct dev_pm_opp *opp;
-	unsigned long freq, bootup_volt;
-	struct device *dev;
-
-	if (!vdd_name || !clk_name || !oh_name) {
-		pr_err("%s: invalid parameters\n", __func__);
-		goto exit;
-	}
-
-	if (!strncmp(oh_name, "mpu", 3))
-		/* 
-		 * All current OMAPs share voltage rail and clock
-		 * source, so CPU0 is used to represent the MPU-SS.
-		 */
-		dev = get_cpu_device(0);
-	else
-		dev = omap_device_get_by_hwmod_name(oh_name);
-
-	if (IS_ERR(dev)) {
-		pr_err("%s: Unable to get dev pointer for hwmod %s\n",
-			__func__, oh_name);
-		goto exit;
-	}
-
-	voltdm = voltdm_lookup(vdd_name);
-	if (!voltdm) {
-		pr_err("%s: unable to get vdd pointer for vdd_%s\n",
-			__func__, vdd_name);
-		goto exit;
-	}
-
-	clk =  clk_get(NULL, clk_name);
-	if (IS_ERR(clk)) {
-		pr_err("%s: unable to get clk %s\n", __func__, clk_name);
-		goto exit;
-	}
-
-	freq = clk_get_rate(clk);
-	clk_put(clk);
-
-	opp = dev_pm_opp_find_freq_ceil(dev, &freq);
-	if (IS_ERR(opp)) {
-		pr_err("%s: unable to find boot up OPP for vdd_%s\n",
-			__func__, vdd_name);
-		goto exit;
-	}
-
-	bootup_volt = dev_pm_opp_get_voltage(opp);
-	dev_pm_opp_put(opp);
-
-	if (!bootup_volt) {
-		pr_err("%s: unable to find voltage corresponding to the bootup OPP for vdd_%s\n",
-		       __func__, vdd_name);
-		goto exit;
-	}
-
-	voltdm_scale(voltdm, bootup_volt);
-	return 0;
-
-exit:
-	pr_err("%s: unable to set vdd_%s\n", __func__, vdd_name);
-	return -EINVAL;
-}
-
 #ifdef CONFIG_SUSPEND
 static int omap_pm_enter(suspend_state_t suspend_state)
 {
@@ -211,25 +134,6 @@ void omap_common_suspend_init(void *pm_suspend)
 }
 #endif /* CONFIG_SUSPEND */
 
-static void __init omap3_init_voltages(void)
-{
-	if (!soc_is_omap34xx())
-		return;
-
-	omap2_set_init_voltage("mpu_iva", "dpll1_ck", "mpu");
-	omap2_set_init_voltage("core", "l3_ick", "l3_main");
-}
-
-static void __init omap4_init_voltages(void)
-{
-	if (!soc_is_omap44xx())
-		return;
-
-	omap2_set_init_voltage("mpu", "dpll_mpu_ck", "mpu");
-	omap2_set_init_voltage("core", "l3_div_ck", "l3_main_1");
-	omap2_set_init_voltage("iva", "dpll_iva_m5x2_ck", "iva");
-}
-
 int __maybe_unused omap_pm_nop_init(void)
 {
 	return 0;
@@ -249,10 +153,6 @@ int __init omap2_common_pm_late_init(void)
 	omap4_twl_init();
 	omap_voltage_late_init();
 
-	/* Initialize the voltages */
-	omap3_init_voltages();
-	omap4_init_voltages();
-
 	/* Smartreflex device init */
 	omap_devinit_smartreflex();
 
diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig
index 46ed10a..a66a9ad1 100644
--- a/arch/arm/mach-qcom/Kconfig
+++ b/arch/arm/mach-qcom/Kconfig
@@ -27,4 +27,27 @@
 	bool "Enable support for MDM9615"
 	select CLKSRC_QCOM
 
+config ARCH_BENGAL
+	bool "Enable Support for Qualcomm Technologies, Inc. BENGAL"
+	select COMMON_CLK_QCOM
+	select CPU_V7
+	select HAVE_CLK
+	select HAVE_CLK_PREPARE
+	select PM_OPP
+	select SOC_BUS
+	select THERMAL_WRITABLE_TRIPS
+	select ARM_GIC_V3
+	select ARM_AMBA
+	select SPARSE_IRQ
+	select MULTI_IRQ_HANDLER
+	select HAVE_ARM_ARCH_TIMER
+	select COMMON_CLK
+	select PINCTRL_MSM
+	select MSM_PM if PM
+	select CPU_FREQ
+	select PM_DEVFREQ
+	help
+	  This enables support for the BENGAL chipset. If you do not
+	  wish to build a kernel that runs on this chipset, say 'N' here.
+
 endif
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 8211cf4..e6f0191 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -375,10 +375,10 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
 			prot, caller);
 }
 
-static void __dma_free_remap(void *cpu_addr, size_t size)
+static void __dma_free_remap(void *cpu_addr, size_t size, bool no_warn)
 {
 	dma_common_free_remap(cpu_addr, size,
-			VM_ARM_DMA_CONSISTENT | VM_USERMAP);
+			VM_ARM_DMA_CONSISTENT | VM_USERMAP, no_warn);
 }
 
 #define DEFAULT_DMA_COHERENT_POOL_SIZE	SZ_256K
@@ -624,7 +624,7 @@ static void __free_from_contiguous(struct device *dev, struct page *page,
 {
 	if (want_vaddr) {
 		if (PageHighMem(page))
-			__dma_free_remap(cpu_addr, size);
+			__dma_free_remap(cpu_addr, size, true);
 		else
 			__dma_remap(page, size, PAGE_KERNEL);
 	}
@@ -716,7 +716,7 @@ static void *remap_allocator_alloc(struct arm_dma_alloc_args *args,
 static void remap_allocator_free(struct arm_dma_free_args *args)
 {
 	if (args->want_vaddr)
-		__dma_free_remap(args->cpu_addr, args->size);
+		__dma_free_remap(args->cpu_addr, args->size, false);
 
 	__dma_free_buffer(args->page, args->size);
 }
@@ -1648,7 +1648,7 @@ void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
 
 	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) {
 		dma_common_free_remap(cpu_addr, size,
-			VM_ARM_DMA_CONSISTENT | VM_USERMAP);
+			VM_ARM_DMA_CONSISTENT | VM_USERMAP, true);
 	}
 
 	__iommu_remove_mapping(dev, handle, size);
diff --git a/arch/arm/xen/efi.c b/arch/arm/xen/efi.c
index b4d7895..bc9a37b 100644
--- a/arch/arm/xen/efi.c
+++ b/arch/arm/xen/efi.c
@@ -31,7 +31,9 @@ void __init xen_efi_runtime_setup(void)
 	efi.get_variable             = xen_efi_get_variable;
 	efi.get_next_variable        = xen_efi_get_next_variable;
 	efi.set_variable             = xen_efi_set_variable;
+	efi.set_variable_nonblocking = xen_efi_set_variable;
 	efi.query_variable_info      = xen_efi_query_variable_info;
+	efi.query_variable_info_nonblocking = xen_efi_query_variable_info;
 	efi.update_capsule           = xen_efi_update_capsule;
 	efi.query_capsule_caps       = xen_efi_query_capsule_caps;
 	efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index bf0109c..a465396 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -85,6 +85,7 @@
 	select GENERIC_CLOCKEVENTS
 	select GENERIC_CLOCKEVENTS_BROADCAST
 	select GENERIC_CPU_AUTOPROBE
+	select GENERIC_CPU_VULNERABILITIES
 	select GENERIC_EARLY_IOREMAP
 	select GENERIC_IDLE_POLL_SETUP
 	select GENERIC_IRQ_MULTI_HANDLER
diff --git a/arch/arm64/configs/cuttlefish_defconfig b/arch/arm64/configs/cuttlefish_defconfig
index 943f52c..713be11 100644
--- a/arch/arm64/configs/cuttlefish_defconfig
+++ b/arch/arm64/configs/cuttlefish_defconfig
@@ -30,6 +30,7 @@
 # CONFIG_FHANDLE is not set
 CONFIG_KALLSYMS_ALL=y
 CONFIG_BPF_SYSCALL=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
 # CONFIG_RSEQ is not set
 CONFIG_EMBEDDED=y
 # CONFIG_VM_EVENT_COUNTERS is not set
@@ -191,6 +192,7 @@
 CONFIG_NET_CLS_ACT=y
 CONFIG_VSOCKETS=y
 CONFIG_VIRTIO_VSOCKETS=y
+CONFIG_BPF_JIT=y
 CONFIG_CFG80211=y
 # CONFIG_CFG80211_DEFAULT_PS is not set
 # CONFIG_CFG80211_CRDA_SUPPORT is not set
diff --git a/arch/arm64/configs/vendor/bengal-perf_defconfig b/arch/arm64/configs/vendor/bengal-perf_defconfig
index 3e05871..64d2f4f 100644
--- a/arch/arm64/configs/vendor/bengal-perf_defconfig
+++ b/arch/arm64/configs/vendor/bengal-perf_defconfig
@@ -71,6 +71,7 @@
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_ARM_QCOM_CPUFREQ_HW=y
 CONFIG_MSM_TZ_LOG=y
 CONFIG_ARM64_CRYPTO=y
 CONFIG_CRYPTO_SHA1_ARM64_CE=y
@@ -91,6 +92,8 @@
 CONFIG_PARTITION_ADVANCED=y
 # CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_IOSCHED_BFQ=y
+CONFIG_BFQ_GROUP_IOSCHED=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_CMA=y
 CONFIG_ZSMALLOC=y
@@ -370,6 +373,7 @@
 CONFIG_SND=y
 CONFIG_SND_DYNAMIC_MINORS=y
 CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
 CONFIG_SND_SOC=y
 CONFIG_UHID=y
 CONFIG_HID_APPLE=y
@@ -378,6 +382,8 @@
 CONFIG_HID_MICROSOFT=y
 CONFIG_HID_MULTITOUCH=y
 CONFIG_HID_PLANTRONICS=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
 CONFIG_USB_XHCI_HCD=y
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_HCD_PLATFORM=y
@@ -391,13 +397,19 @@
 CONFIG_USB_EHSET_TEST_FIXTURE=y
 CONFIG_USB_LINK_LAYER_TEST=y
 CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
 CONFIG_USB_QCOM_EMU_PHY=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_GADGET_VBUS_DRAW=900
 CONFIG_USB_CONFIGFS=y
 CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_RNDIS=y
 CONFIG_USB_CONFIGFS_MASS_STORAGE=y
 CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
 CONFIG_USB_CONFIGFS_F_MIDI=y
 CONFIG_USB_CONFIGFS_F_HID=y
 CONFIG_USB_CONFIGFS_F_DIAG=y
@@ -405,6 +417,8 @@
 CONFIG_USB_CONFIGFS_F_CCID=y
 CONFIG_USB_CONFIGFS_F_QDSS=y
 CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
 CONFIG_MMC=y
 CONFIG_MMC_BLOCK_MINORS=32
 CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
@@ -492,6 +506,7 @@
 CONFIG_QTEE_SHM_BRIDGE=y
 CONFIG_MEM_SHARE_QMI_SERVICE=y
 CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_QCOM_QHEE_ENABLE_MEM_PROTECTION=y
 CONFIG_ICNSS=y
 CONFIG_ICNSS_QMI=y
 CONFIG_DEVFREQ_GOV_PASSIVE=y
diff --git a/arch/arm64/configs/vendor/bengal_defconfig b/arch/arm64/configs/vendor/bengal_defconfig
index f364a6d..e1abfc1 100644
--- a/arch/arm64/configs/vendor/bengal_defconfig
+++ b/arch/arm64/configs/vendor/bengal_defconfig
@@ -75,6 +75,7 @@
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_ARM_QCOM_CPUFREQ_HW=y
 CONFIG_MSM_TZ_LOG=y
 CONFIG_ARM64_CRYPTO=y
 CONFIG_CRYPTO_SHA1_ARM64_CE=y
@@ -382,6 +383,7 @@
 CONFIG_SND=y
 CONFIG_SND_DYNAMIC_MINORS=y
 CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
 CONFIG_SND_SOC=y
 CONFIG_UHID=y
 CONFIG_HID_APPLE=y
@@ -390,6 +392,8 @@
 CONFIG_HID_MICROSOFT=y
 CONFIG_HID_MULTITOUCH=y
 CONFIG_HID_PLANTRONICS=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
 CONFIG_USB_XHCI_HCD=y
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_HCD_PLATFORM=y
@@ -403,13 +407,19 @@
 CONFIG_USB_EHSET_TEST_FIXTURE=y
 CONFIG_USB_LINK_LAYER_TEST=y
 CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
 CONFIG_USB_QCOM_EMU_PHY=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_GADGET_VBUS_DRAW=900
 CONFIG_USB_CONFIGFS=y
 CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_RNDIS=y
 CONFIG_USB_CONFIGFS_MASS_STORAGE=y
 CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
 CONFIG_USB_CONFIGFS_F_MIDI=y
 CONFIG_USB_CONFIGFS_F_HID=y
 CONFIG_USB_CONFIGFS_F_DIAG=y
@@ -417,6 +427,8 @@
 CONFIG_USB_CONFIGFS_F_CCID=y
 CONFIG_USB_CONFIGFS_F_QDSS=y
 CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
 CONFIG_MMC=y
 CONFIG_MMC_BLOCK_MINORS=32
 CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
@@ -514,6 +526,7 @@
 CONFIG_QTEE_SHM_BRIDGE=y
 CONFIG_MEM_SHARE_QMI_SERVICE=y
 CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_QCOM_QHEE_ENABLE_MEM_PROTECTION=y
 CONFIG_ICNSS=y
 CONFIG_ICNSS_DEBUG=y
 CONFIG_ICNSS_QMI=y
diff --git a/arch/arm64/configs/vendor/kona-perf_defconfig b/arch/arm64/configs/vendor/kona-perf_defconfig
index 49a1a9f..d6410c5 100644
--- a/arch/arm64/configs/vendor/kona-perf_defconfig
+++ b/arch/arm64/configs/vendor/kona-perf_defconfig
@@ -619,6 +619,7 @@
 CONFIG_DEVFREQ_SIMPLE_DEV=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_ARM_QCOM_DEVFREQ_QOSLAT=y
+CONFIG_DEVFREQ_GOV_STATICMAP=y
 CONFIG_EXTCON_USB_GPIO=y
 CONFIG_IIO=y
 CONFIG_QCOM_SPMI_ADC5=y
diff --git a/arch/arm64/configs/vendor/kona_defconfig b/arch/arm64/configs/vendor/kona_defconfig
index cfd0228..11a2d53 100644
--- a/arch/arm64/configs/vendor/kona_defconfig
+++ b/arch/arm64/configs/vendor/kona_defconfig
@@ -585,6 +585,7 @@
 CONFIG_QCOM_QMI_RMNET=y
 CONFIG_QCOM_QMI_DFC=y
 CONFIG_RMNET_CTL=y
+CONFIG_RMNET_CTL_DEBUG=y
 CONFIG_QCOM_QMI_POWER_COLLAPSE=y
 CONFIG_QCOM_RPMH=y
 CONFIG_QCOM_SMEM=y
@@ -643,6 +644,7 @@
 CONFIG_DEVFREQ_SIMPLE_DEV=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_ARM_QCOM_DEVFREQ_QOSLAT=y
+CONFIG_DEVFREQ_GOV_STATICMAP=y
 CONFIG_EXTCON_USB_GPIO=y
 CONFIG_IIO=y
 CONFIG_QCOM_SPMI_ADC5=y
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 510f687..dda6e50 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -525,11 +525,7 @@ static inline int arm64_get_ssbd_state(void)
 #endif
 }
 
-#ifdef CONFIG_ARM64_SSBD
 void arm64_set_ssbd_mitigation(bool state);
-#else
-static inline void arm64_set_ssbd_mitigation(bool state) {}
-#endif
 
 #endif /* __ASSEMBLY__ */
 
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 6abe400..367b2e0 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -398,6 +398,8 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
 
 DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
 
+void __kvm_enable_ssbs(void);
+
 static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
 				       unsigned long hyp_stack_ptr,
 				       unsigned long vector_ptr)
@@ -418,6 +420,15 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
 	 */
 	BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
 	__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
+
+	/*
+	 * Disabling SSBD on a non-VHE system requires us to enable SSBS
+	 * at EL2.
+	 */
+	if (!has_vhe() && this_cpu_has_cap(ARM64_SSBS) &&
+	    arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
+		kvm_call_hyp(__kvm_enable_ssbs);
+	}
 }
 
 static inline bool kvm_arch_check_sve_has_vhe(void)
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 71730a1..cd250ec 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -19,9 +19,11 @@
 #include <linux/arm-smccc.h>
 #include <linux/psci.h>
 #include <linux/types.h>
+#include <linux/cpu.h>
 #include <asm/cpu.h>
 #include <asm/cputype.h>
 #include <asm/cpufeature.h>
+#include <asm/smp_plat.h>
 
 static bool __maybe_unused
 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
@@ -87,7 +89,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
 
 atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
 
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 #include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
 
@@ -109,9 +110,9 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
 	__flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
 }
 
-static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
-				      const char *hyp_vecs_start,
-				      const char *hyp_vecs_end)
+static void install_bp_hardening_cb(bp_hardening_cb_t fn,
+				    const char *hyp_vecs_start,
+				    const char *hyp_vecs_end)
 {
 	static DEFINE_SPINLOCK(bp_lock);
 	int cpu, slot = -1;
@@ -138,7 +139,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
 #define __smccc_workaround_1_smc_start		NULL
 #define __smccc_workaround_1_smc_end		NULL
 
-static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+static void install_bp_hardening_cb(bp_hardening_cb_t fn,
 				      const char *hyp_vecs_start,
 				      const char *hyp_vecs_end)
 {
@@ -146,23 +147,6 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
 }
 #endif	/* CONFIG_KVM_INDIRECT_VECTORS */
 
-static void  install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
-				     bp_hardening_cb_t fn,
-				     const char *hyp_vecs_start,
-				     const char *hyp_vecs_end)
-{
-	u64 pfr0;
-
-	if (!entry->matches(entry, SCOPE_LOCAL_CPU))
-		return;
-
-	pfr0 = read_cpuid(ID_AA64PFR0_EL1);
-	if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
-		return;
-
-	__install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
-}
-
 #include <uapi/linux/psci.h>
 #include <linux/arm-smccc.h>
 #include <linux/psci.h>
@@ -189,60 +173,83 @@ static void qcom_link_stack_sanitization(void)
 		     : "=&r" (tmp));
 }
 
-static void
-enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
+static bool __nospectre_v2;
+static int __init parse_nospectre_v2(char *str)
+{
+	__nospectre_v2 = true;
+	return 0;
+}
+early_param("nospectre_v2", parse_nospectre_v2);
+
+/*
+ * -1: No workaround
+ *  0: No workaround required
+ *  1: Workaround installed
+ */
+static int detect_harden_bp_fw(void)
 {
 	bp_hardening_cb_t cb;
 	void *smccc_start, *smccc_end;
 	struct arm_smccc_res res;
 	u32 midr = read_cpuid_id();
 
-	if (!entry->matches(entry, SCOPE_LOCAL_CPU))
-		return;
-
 	if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
-		return;
+		return -1;
 
 	switch (psci_ops.conduit) {
 	case PSCI_CONDUIT_HVC:
 		arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
 				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
-		if ((int)res.a0 < 0)
-			return;
-		cb = call_hvc_arch_workaround_1;
-		/* This is a guest, no need to patch KVM vectors */
-		smccc_start = NULL;
-		smccc_end = NULL;
+		switch ((int)res.a0) {
+		case 1:
+			/* Firmware says we're just fine */
+			return 0;
+		case 0:
+			cb = call_hvc_arch_workaround_1;
+			/* This is a guest, no need to patch KVM vectors */
+			smccc_start = NULL;
+			smccc_end = NULL;
+			break;
+		default:
+			return -1;
+		}
 		break;
 
 	case PSCI_CONDUIT_SMC:
 		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
 				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
-		if ((int)res.a0 < 0)
-			return;
-		cb = call_smc_arch_workaround_1;
-		smccc_start = __smccc_workaround_1_smc_start;
-		smccc_end = __smccc_workaround_1_smc_end;
+		switch ((int)res.a0) {
+		case 1:
+			/* Firmware says we're just fine */
+			return 0;
+		case 0:
+			cb = call_smc_arch_workaround_1;
+			smccc_start = __smccc_workaround_1_smc_start;
+			smccc_end = __smccc_workaround_1_smc_end;
+			break;
+		default:
+			return -1;
+		}
 		break;
 
 	default:
-		return;
+		return -1;
 	}
 
 	if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
 	    ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
 		cb = qcom_link_stack_sanitization;
 
-	install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
+	if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
+		install_bp_hardening_cb(cb, smccc_start, smccc_end);
 
-	return;
+	return 1;
 }
-#endif	/* CONFIG_HARDEN_BRANCH_PREDICTOR */
 
-#ifdef CONFIG_ARM64_SSBD
 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
 
 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
+static bool __ssb_safe = true;
 
 static const struct ssbd_options {
 	const char	*str;
@@ -312,6 +319,11 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt,
 
 void arm64_set_ssbd_mitigation(bool state)
 {
+	if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
+		pr_info_once("SSBD disabled by kernel configuration\n");
+		return;
+	}
+
 	if (this_cpu_has_cap(ARM64_SSBS)) {
 		if (state)
 			asm volatile(SET_PSTATE_SSBS(0));
@@ -341,16 +353,28 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
 	struct arm_smccc_res res;
 	bool required = true;
 	s32 val;
+	bool this_cpu_safe = false;
 
 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 
+	if (cpu_mitigations_off())
+		ssbd_state = ARM64_SSBD_FORCE_DISABLE;
+
+	/* delay setting __ssb_safe until we get a firmware response */
+	if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
+		this_cpu_safe = true;
+
 	if (this_cpu_has_cap(ARM64_SSBS)) {
+		if (!this_cpu_safe)
+			__ssb_safe = false;
 		required = false;
 		goto out_printmsg;
 	}
 
 	if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
 		ssbd_state = ARM64_SSBD_UNKNOWN;
+		if (!this_cpu_safe)
+			__ssb_safe = false;
 		return false;
 	}
 
@@ -367,6 +391,8 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
 
 	default:
 		ssbd_state = ARM64_SSBD_UNKNOWN;
+		if (!this_cpu_safe)
+			__ssb_safe = false;
 		return false;
 	}
 
@@ -375,14 +401,18 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
 	switch (val) {
 	case SMCCC_RET_NOT_SUPPORTED:
 		ssbd_state = ARM64_SSBD_UNKNOWN;
+		if (!this_cpu_safe)
+			__ssb_safe = false;
 		return false;
 
+	/* machines with mixed mitigation requirements must not return this */
 	case SMCCC_RET_NOT_REQUIRED:
 		pr_info_once("%s mitigation not required\n", entry->desc);
 		ssbd_state = ARM64_SSBD_MITIGATED;
 		return false;
 
 	case SMCCC_RET_SUCCESS:
+		__ssb_safe = false;
 		required = true;
 		break;
 
@@ -392,6 +422,8 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
 
 	default:
 		WARN_ON(1);
+		if (!this_cpu_safe)
+			__ssb_safe = false;
 		return false;
 	}
 
@@ -431,7 +463,14 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
 
 	return required;
 }
-#endif	/* CONFIG_ARM64_SSBD */
+
+/* known invulnerable cores */
+static const struct midr_range arm64_ssb_cpus[] = {
+	MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+	MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+	MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+	{},
+};
 
 #ifdef CONFIG_ARM64_ERRATUM_1463225
 DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
@@ -486,6 +525,10 @@ has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
 	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,			\
 	CAP_MIDR_RANGE_LIST(midr_list)
 
+/* Track overall mitigation state. We are only mitigated if all cores are ok */
+static bool __hardenbp_enab = true;
+static bool __spectrev2_safe = true;
+
 /*
  * Generic helper for handling capabilties with multiple (match,enable) pairs
  * of call backs, sharing the same capability bit.
@@ -518,27 +561,87 @@ multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
 			caps->cpu_enable(caps);
 }
 
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+/*
+ * List of CPUs that do not need any Spectre-v2 mitigation at all.
+ */
+static const struct midr_range spectre_v2_safe_list[] = {
+	MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+	MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+	MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+	{ /* sentinel */ }
+};
 
 /*
- * List of CPUs where we need to issue a psci call to
- * harden the branch predictor.
+ * Track overall bp hardening for all heterogeneous cores in the machine.
+ * We are only considered "safe" if all booted cores are known safe.
  */
-static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
-	MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
-	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
-	MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
-	MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+static bool __maybe_unused
+check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
+{
+	int need_wa;
+
+	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+	/* If the CPU has CSV2 set, we're safe */
+	if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
+						 ID_AA64PFR0_CSV2_SHIFT))
+		return false;
+
+	/* Alternatively, we have a list of unaffected CPUs */
+	if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
+		return false;
+
+	/* Fallback to firmware detection */
+	need_wa = detect_harden_bp_fw();
+	if (!need_wa)
+		return false;
+
+	__spectrev2_safe = false;
+
+	if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
+		pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
+		__hardenbp_enab = false;
+		return false;
+	}
+
+	/* forced off */
+	if (__nospectre_v2 || cpu_mitigations_off()) {
+		pr_info_once("spectrev2 mitigation disabled by command line option\n");
+		__hardenbp_enab = false;
+		return false;
+	}
+
+	if (need_wa < 0) {
+		pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
+		__hardenbp_enab = false;
+	}
+
+	return (need_wa > 0);
+}
+
+static const __maybe_unused struct midr_range tx2_family_cpus[] = {
 	MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
 	MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
-	MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
-	MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
-	MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER),
-	MIDR_ALL_VERSIONS(MIDR_KRYO2XX_GOLD),
 	{},
 };
 
-#endif
+static bool __maybe_unused
+needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
+			 int scope)
+{
+	int i;
+
+	if (!is_affected_midr_range_list(entry, scope) ||
+	    !is_hyp_mode_available())
+		return false;
+
+	for_each_possible_cpu(i) {
+		if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
+			return true;
+	}
+
+	return false;
+}
 
 #ifdef CONFIG_HARDEN_EL2_VECTORS
 
@@ -731,13 +834,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
 		ERRATA_MIDR_RANGE_LIST(arm64_workaround_858921_cpus),
 	},
 #endif
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 	{
 		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
-		.cpu_enable = enable_smccc_arch_workaround_1,
-		ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
+		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+		.matches = check_branch_predictor,
 	},
-#endif
 #ifdef CONFIG_HARDEN_EL2_VECTORS
 	{
 		.desc = "EL2 vector hardening",
@@ -745,14 +846,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
 		ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
 	},
 #endif
-#ifdef CONFIG_ARM64_SSBD
 	{
 		.desc = "Speculative Store Bypass Disable",
 		.capability = ARM64_SSBD,
 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
 		.matches = has_ssbd_mitigation,
+		.midr_range_list = arm64_ssb_cpus,
 	},
-#endif
 #ifdef CONFIG_ARM64_ERRATUM_1188873
 	{
 		.desc = "ARM erratum 1188873",
@@ -768,6 +868,49 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
 		.matches = has_cortex_a76_erratum_1463225,
 	},
 #endif
+#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
+	{
+		.desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
+		.capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
+		ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
+		.matches = needs_tx2_tvm_workaround,
+	},
+#endif
 	{
 	}
 };
+
+ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+}
+
+ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	if (__spectrev2_safe)
+		return sprintf(buf, "Not affected\n");
+
+	if (__hardenbp_enab)
+		return sprintf(buf, "Mitigation: Branch predictor hardening\n");
+
+	return sprintf(buf, "Vulnerable\n");
+}
+
+ssize_t cpu_show_spec_store_bypass(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	if (__ssb_safe)
+		return sprintf(buf, "Not affected\n");
+
+	switch (ssbd_state) {
+	case ARM64_SSBD_KERNEL:
+	case ARM64_SSBD_FORCE_ENABLE:
+		if (IS_ENABLED(CONFIG_ARM64_SSBD))
+			return sprintf(buf,
+			    "Mitigation: Speculative Store Bypass disabled via prctl\n");
+	}
+
+	return sprintf(buf, "Vulnerable\n");
+}
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 5d1fed8..d6fb0be 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -24,6 +24,7 @@
 #include <linux/stop_machine.h>
 #include <linux/types.h>
 #include <linux/mm.h>
+#include <linux/cpu.h>
 #include <asm/cpu.h>
 #include <asm/cpufeature.h>
 #include <asm/cpu_ops.h>
@@ -889,7 +890,7 @@ static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
 	return ctr & BIT(CTR_DIC_SHIFT);
 }
 
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+static bool __meltdown_safe = true;
 static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
 
 static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
@@ -907,7 +908,17 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
 		{ /* sentinel */ }
 	};
-	char const *str = "command line option";
+	char const *str = "kpti command line option";
+	bool meltdown_safe;
+
+	meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
+
+	/* Defer to CPU feature registers */
+	if (has_cpuid_feature(entry, scope))
+		meltdown_safe = true;
+
+	if (!meltdown_safe)
+		__meltdown_safe = false;
 
 	/*
 	 * For reasons that aren't entirely clear, enabling KPTI on Cavium
@@ -919,6 +930,24 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
 		__kpti_forced = -1;
 	}
 
+	/* Useful for KASLR robustness */
+	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) {
+		if (!__kpti_forced) {
+			str = "KASLR";
+			__kpti_forced = 1;
+		}
+	}
+
+	if (cpu_mitigations_off() && !__kpti_forced) {
+		str = "mitigations=off";
+		__kpti_forced = -1;
+	}
+
+	if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
+		pr_info_once("kernel page table isolation disabled by kernel configuration\n");
+		return false;
+	}
+
 	/* Forced? */
 	if (__kpti_forced) {
 		pr_info_once("kernel page table isolation forced %s by %s\n",
@@ -926,18 +955,10 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
 		return __kpti_forced > 0;
 	}
 
-	/* Useful for KASLR robustness */
-	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
-		return true;
-
-	/* Don't force KPTI for CPUs that are not vulnerable */
-	if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
-		return false;
-
-	/* Defer to CPU feature registers */
-	return !has_cpuid_feature(entry, scope);
+	return !meltdown_safe;
 }
 
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 static void __nocfi
 kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
 {
@@ -962,6 +983,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
 
 	return;
 }
+#else
+static void
+kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
+{
+}
+#endif	/* CONFIG_UNMAP_KERNEL_AT_EL0 */
 
 static int __init parse_kpti(char *str)
 {
@@ -975,7 +1002,6 @@ static int __init parse_kpti(char *str)
 	return 0;
 }
 early_param("kpti", parse_kpti);
-#endif	/* CONFIG_UNMAP_KERNEL_AT_EL0 */
 
 #ifdef CONFIG_ARM64_HW_AFDBM
 static inline void __cpu_enable_hw_dbm(void)
@@ -1197,7 +1223,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
 		.field_pos = ID_AA64PFR0_EL0_SHIFT,
 		.min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
 	},
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 	{
 		.desc = "Kernel page table isolation (KPTI)",
 		.capability = ARM64_UNMAP_KERNEL_AT_EL0,
@@ -1213,7 +1238,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
 		.matches = unmap_kernel_at_el0,
 		.cpu_enable = kpti_install_ng_mappings,
 	},
-#endif
 	{
 		/* FP/SIMD is not implemented */
 		.capability = ARM64_HAS_NO_FPSIMD,
@@ -1854,3 +1878,15 @@ void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
 	/* Firmware may have left a deferred SError in this register. */
 	write_sysreg_s(0, SYS_DISR_EL1);
 }
+
+ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	if (__meltdown_safe)
+		return sprintf(buf, "Not affected\n");
+
+	if (arm64_kernel_unmapped_at_el0())
+		return sprintf(buf, "Mitigation: PTI\n");
+
+	return sprintf(buf, "Vulnerable\n");
+}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index c13674f..a031f04 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -364,22 +364,27 @@ void arch_release_task_struct(struct task_struct *tsk)
 	fpsimd_release_task(tsk);
 }
 
-/*
- * src and dst may temporarily have aliased sve_state after task_struct
- * is copied.  We cannot fix this properly here, because src may have
- * live SVE state and dst's thread_info may not exist yet, so tweaking
- * either src's or dst's TIF_SVE is not safe.
- *
- * The unaliasing is done in copy_thread() instead.  This works because
- * dst is not schedulable or traceable until both of these functions
- * have been called.
- */
 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 {
 	if (current->mm)
 		fpsimd_preserve_current_state();
 	*dst = *src;
 
+	/* We rely on the above assignment to initialize dst's thread_flags: */
+	BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK));
+
+	/*
+	 * Detach src's sve_state (if any) from dst so that it does not
+	 * get erroneously used or freed prematurely.  dst's sve_state
+	 * will be allocated on demand later on if dst uses SVE.
+	 * For consistency, also clear TIF_SVE here: this could be done
+	 * later in copy_process(), but to avoid tripping up future
+	 * maintainers it is best not to leave TIF_SVE and sve_state in
+	 * an inconsistent state, even temporarily.
+	 */
+	dst->thread.sve_state = NULL;
+	clear_tsk_thread_flag(dst, TIF_SVE);
+
 	return 0;
 }
 
@@ -393,13 +398,6 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
 	memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
 
 	/*
-	 * Unalias p->thread.sve_state (if any) from the parent task
-	 * and disable discard SVE state for p:
-	 */
-	clear_tsk_thread_flag(p, TIF_SVE);
-	p->thread.sve_state = NULL;
-
-	/*
 	 * In case p was allocated the same task_struct pointer as some
 	 * other recently-exited task, make sure p is disassociated from
 	 * any cpu that may have run that now-exited task recently.
@@ -439,7 +437,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
 			childregs->pstate |= PSR_UAO_BIT;
 
 		if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
-			childregs->pstate |= PSR_SSBS_BIT;
+			set_ssbs_bit(childregs);
 
 		p->thread.cpu_context.x19 = stack_start;
 		p->thread.cpu_context.x20 = stk_sz;
@@ -480,18 +478,30 @@ void uao_thread_switch(struct task_struct *next)
 	}
 }
 
+/*
+ * Force SSBS state on context-switch, since it may be lost after migrating
+ * from a CPU which treats the bit as RES0 in a heterogeneous system.
+ */
 static void ssbs_thread_switch(struct task_struct *next)
 {
-	if (likely(!(next->flags & PF_KTHREAD)) &&
-	    arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE &&
-	    !test_tsk_thread_flag(next, TIF_SSBD)) {
-		struct pt_regs *regs = task_pt_regs(next);
+	struct pt_regs *regs = task_pt_regs(next);
 
-		if (compat_user_mode(regs))
-			set_compat_ssbs_bit(regs);
-		else if (user_mode(regs))
-			set_ssbs_bit(regs);
-	}
+	/*
+	 * Nothing to do for kernel threads, but 'regs' may be junk
+	 * (e.g. idle task) so check the flags and bail early.
+	 */
+	if (unlikely(next->flags & PF_KTHREAD))
+		return;
+
+	/* If the mitigation is enabled, then we leave SSBS clear. */
+	if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
+	    test_tsk_thread_flag(next, TIF_SSBD))
+		return;
+
+	if (compat_user_mode(regs))
+		set_compat_ssbs_bit(regs);
+	else if (user_mode(regs))
+		set_ssbs_bit(regs);
 }
 
 /*
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 6219486..0211c3c 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -1666,19 +1666,20 @@ void syscall_trace_exit(struct pt_regs *regs)
 }
 
 /*
- * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487C.a
- * We also take into account DIT (bit 24), which is not yet documented, and
- * treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may be
- * allocated an EL0 meaning in future.
+ * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
+ * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
+ * not described in ARM DDI 0487D.a.
+ * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
+ * be allocated an EL0 meaning in future.
  * Userspace cannot use these until they have an architectural meaning.
  * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
  * We also reserve IL for the kernel; SS is handled dynamically.
  */
 #define SPSR_EL1_AARCH64_RES0_BITS \
-	(GENMASK_ULL(63,32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
-	 GENMASK_ULL(20, 10) | GENMASK_ULL(5, 5))
+	(GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
+	 GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5))
 #define SPSR_EL1_AARCH32_RES0_BITS \
-	(GENMASK_ULL(63,32) | GENMASK_ULL(23, 22) | GENMASK_ULL(20,20))
+	(GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
 
 static int valid_compat_regs(struct user_pt_regs *regs)
 {
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index cc48eb6..4996e75 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -359,17 +359,28 @@ void remove_cpu_topology(unsigned int cpu)
 }
 
 #ifdef CONFIG_ACPI
+static bool __init acpi_cpu_is_threaded(int cpu)
+{
+	int is_threaded = acpi_pptt_cpu_is_thread(cpu);
+
+	/*
+	 * if the PPTT doesn't have thread information, assume a homogeneous
+	 * machine and return the current CPU's thread state.
+	 */
+	if (is_threaded < 0)
+		is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
+
+	return !!is_threaded;
+}
+
 /*
  * Propagate the topology information of the processor_topology_node tree to the
  * cpu_topology array.
  */
 static int __init parse_acpi_topology(void)
 {
-	bool is_threaded;
 	int cpu, topology_id;
 
-	is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
-
 	for_each_possible_cpu(cpu) {
 		int i, cache_id;
 
@@ -377,7 +388,7 @@ static int __init parse_acpi_topology(void)
 		if (topology_id < 0)
 			return topology_id;
 
-		if (is_threaded) {
+		if (acpi_cpu_is_threaded(cpu)) {
 			cpu_topology[cpu].thread_id = topology_id;
 			topology_id = find_acpi_cpu_topology(cpu, 1);
 			cpu_topology[cpu].core_id   = topology_id;
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 74e469f..4dae476 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -68,6 +68,10 @@
 #define TRAMP_TEXT
 #endif
 
+#define RTIC_BSS					\
+	. = ALIGN(PAGE_SIZE);				\
+	KEEP(*(.bss.rtic));			\
+	. = ALIGN(PAGE_SIZE);				\
 /*
  * The size of the PE/COFF section that covers the kernel image, which
  * runs from stext to _edata, must be a round multiple of the PE/COFF
@@ -239,6 +243,10 @@
 	STABS_DEBUG
 
 	HEAD_SYMBOLS
+
+	.bss : {			/* bss segment		*/
+         RTIC_BSS
+	}
 }
 
 /*
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index 963d669..7414b76 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -293,3 +293,14 @@ void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
 
 	vcpu->arch.sysregs_loaded_on_cpu = false;
 }
+
+void __hyp_text __kvm_enable_ssbs(void)
+{
+	u64 tmp;
+
+	asm volatile(
+	"mrs	%0, sctlr_el2\n"
+	"orr	%0, %0, %1\n"
+	"msr	sctlr_el2, %0"
+	: "=&r" (tmp) : "L" (SCTLR_ELx_DSSBS));
+}
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 7f0258e..dd6b600 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -949,3 +949,25 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 					   tmp : orig_prog);
 	return prog;
 }
+
+#ifdef CONFIG_CFI_CLANG
+bool arch_bpf_jit_check_func(const struct bpf_prog *prog)
+{
+	const uintptr_t func = (const uintptr_t)prog->bpf_func;
+
+	/*
+	 * bpf_func must be correctly aligned and within the correct region.
+	 * module_alloc places JIT code in the module region, unless
+	 * ARM64_MODULE_PLTS is enabled, in which case we might end up using
+	 * the vmalloc region too.
+	 */
+	if (unlikely(!IS_ALIGNED(func, sizeof(u32))))
+		return false;
+
+	if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
+			is_vmalloc_addr(prog->bpf_func))
+		return true;
+
+	return (func >= MODULES_VADDR && func < MODULES_END);
+}
+#endif
diff --git a/arch/mips/boot/dts/qca/ar9331.dtsi b/arch/mips/boot/dts/qca/ar9331.dtsi
index 2bae201..1c7bf11 100644
--- a/arch/mips/boot/dts/qca/ar9331.dtsi
+++ b/arch/mips/boot/dts/qca/ar9331.dtsi
@@ -99,7 +99,7 @@
 
 			miscintc: interrupt-controller@18060010 {
 				compatible = "qca,ar7240-misc-intc";
-				reg = <0x18060010 0x4>;
+				reg = <0x18060010 0x8>;
 
 				interrupt-parent = <&cpuintc>;
 				interrupts = <6>;
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index c3d0d0a..6895430 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -623,7 +623,6 @@
 CONFIG_USB_EMI62=m
 CONFIG_USB_EMI26=m
 CONFIG_USB_ADUTUX=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYPRESS_CY7C63=m
diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
index 5f71aa5..1a3e1fe 100644
--- a/arch/mips/configs/rm200_defconfig
+++ b/arch/mips/configs/rm200_defconfig
@@ -335,7 +335,6 @@
 CONFIG_USB_SERIAL_CYBERJACK=m
 CONFIG_USB_SERIAL_XIRCOM=m
 CONFIG_USB_SERIAL_OMNINET=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYTHERM=m
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 0edba3e..4e2ee74 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -387,6 +387,22 @@
 #define cpu_has_dsp3		__ase(MIPS_ASE_DSP3)
 #endif
 
+#ifndef cpu_has_loongson_mmi
+#define cpu_has_loongson_mmi		__ase(MIPS_ASE_LOONGSON_MMI)
+#endif
+
+#ifndef cpu_has_loongson_cam
+#define cpu_has_loongson_cam		__ase(MIPS_ASE_LOONGSON_CAM)
+#endif
+
+#ifndef cpu_has_loongson_ext
+#define cpu_has_loongson_ext		__ase(MIPS_ASE_LOONGSON_EXT)
+#endif
+
+#ifndef cpu_has_loongson_ext2
+#define cpu_has_loongson_ext2		__ase(MIPS_ASE_LOONGSON_EXT2)
+#endif
+
 #ifndef cpu_has_mipsmt
 #define cpu_has_mipsmt		__isa_lt_and_ase(6, MIPS_ASE_MIPSMT)
 #endif
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index dacbdb8..2b4b14a 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -436,5 +436,9 @@ enum cpu_type_enum {
 #define MIPS_ASE_MSA		0x00000100 /* MIPS SIMD Architecture */
 #define MIPS_ASE_DSP3		0x00000200 /* Signal Processing ASE Rev 3*/
 #define MIPS_ASE_MIPS16E2	0x00000400 /* MIPS16e2 */
+#define MIPS_ASE_LOONGSON_MMI	0x00000800 /* Loongson MultiMedia extensions Instructions */
+#define MIPS_ASE_LOONGSON_CAM	0x00001000 /* Loongson CAM */
+#define MIPS_ASE_LOONGSON_EXT	0x00002000 /* Loongson EXTensions */
+#define MIPS_ASE_LOONGSON_EXT2	0x00004000 /* Loongson EXTensions R2 */
 
 #endif /* _ASM_CPU_H */
diff --git a/arch/mips/include/uapi/asm/hwcap.h b/arch/mips/include/uapi/asm/hwcap.h
index a2aba4b..1ade1da 100644
--- a/arch/mips/include/uapi/asm/hwcap.h
+++ b/arch/mips/include/uapi/asm/hwcap.h
@@ -6,5 +6,16 @@
 #define HWCAP_MIPS_R6		(1 << 0)
 #define HWCAP_MIPS_MSA		(1 << 1)
 #define HWCAP_MIPS_CRC32	(1 << 2)
+#define HWCAP_MIPS_MIPS16	(1 << 3)
+#define HWCAP_MIPS_MDMX     (1 << 4)
+#define HWCAP_MIPS_MIPS3D   (1 << 5)
+#define HWCAP_MIPS_SMARTMIPS (1 << 6)
+#define HWCAP_MIPS_DSP      (1 << 7)
+#define HWCAP_MIPS_DSP2     (1 << 8)
+#define HWCAP_MIPS_DSP3     (1 << 9)
+#define HWCAP_MIPS_MIPS16E2 (1 << 10)
+#define HWCAP_LOONGSON_MMI  (1 << 11)
+#define HWCAP_LOONGSON_EXT  (1 << 12)
+#define HWCAP_LOONGSON_EXT2 (1 << 13)
 
 #endif /* _UAPI_ASM_HWCAP_H */
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 25cd873..581defb 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -1489,6 +1489,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
 			__cpu_name[cpu] = "ICT Loongson-3";
 			set_elf_platform(cpu, "loongson3a");
 			set_isa(c, MIPS_CPU_ISA_M64R1);
+			c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
+				MIPS_ASE_LOONGSON_EXT);
 			break;
 		case PRID_REV_LOONGSON3B_R1:
 		case PRID_REV_LOONGSON3B_R2:
@@ -1496,6 +1498,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
 			__cpu_name[cpu] = "ICT Loongson-3";
 			set_elf_platform(cpu, "loongson3b");
 			set_isa(c, MIPS_CPU_ISA_M64R1);
+			c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
+				MIPS_ASE_LOONGSON_EXT);
 			break;
 		}
 
@@ -1861,6 +1865,8 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
 		decode_configs(c);
 		c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
 		c->writecombine = _CACHE_UNCACHED_ACCELERATED;
+		c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
+			MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2);
 		break;
 	default:
 		panic("Unknown Loongson Processor ID!");
@@ -2099,6 +2105,39 @@ void cpu_probe(void)
 		elf_hwcap |= HWCAP_MIPS_MSA;
 	}
 
+	if (cpu_has_mips16)
+		elf_hwcap |= HWCAP_MIPS_MIPS16;
+
+	if (cpu_has_mdmx)
+		elf_hwcap |= HWCAP_MIPS_MDMX;
+
+	if (cpu_has_mips3d)
+		elf_hwcap |= HWCAP_MIPS_MIPS3D;
+
+	if (cpu_has_smartmips)
+		elf_hwcap |= HWCAP_MIPS_SMARTMIPS;
+
+	if (cpu_has_dsp)
+		elf_hwcap |= HWCAP_MIPS_DSP;
+
+	if (cpu_has_dsp2)
+		elf_hwcap |= HWCAP_MIPS_DSP2;
+
+	if (cpu_has_dsp3)
+		elf_hwcap |= HWCAP_MIPS_DSP3;
+
+	if (cpu_has_mips16e2)
+		elf_hwcap |= HWCAP_MIPS_MIPS16E2;
+
+	if (cpu_has_loongson_mmi)
+		elf_hwcap |= HWCAP_LOONGSON_MMI;
+
+	if (cpu_has_loongson_ext)
+		elf_hwcap |= HWCAP_LOONGSON_EXT;
+
+	if (cpu_has_loongson_ext2)
+		elf_hwcap |= HWCAP_LOONGSON_EXT2;
+
 	if (cpu_has_vz)
 		cpu_probe_vz(c);
 
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index b2de408..f8d3671 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -124,6 +124,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
 	if (cpu_has_eva)	seq_printf(m, "%s", " eva");
 	if (cpu_has_htw)	seq_printf(m, "%s", " htw");
 	if (cpu_has_xpa)	seq_printf(m, "%s", " xpa");
+	if (cpu_has_loongson_mmi)	seq_printf(m, "%s", " loongson-mmi");
+	if (cpu_has_loongson_cam)	seq_printf(m, "%s", " loongson-cam");
+	if (cpu_has_loongson_ext)	seq_printf(m, "%s", " loongson-ext");
+	if (cpu_has_loongson_ext2)	seq_printf(m, "%s", " loongson-ext2");
 	seq_printf(m, "\n");
 
 	if (cpu_has_mmips) {
diff --git a/arch/mips/loongson64/Platform b/arch/mips/loongson64/Platform
index 0fce460..12abf14 100644
--- a/arch/mips/loongson64/Platform
+++ b/arch/mips/loongson64/Platform
@@ -43,6 +43,10 @@
       $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64)
 endif
 
+# Some -march= flags enable MMI instructions, and GCC complains about that
+# support being enabled alongside -msoft-float. Thus explicitly disable MMI.
+cflags-y += $(call cc-option,-mno-loongson-mmi)
+
 #
 # Loongson Machines' Support
 #
diff --git a/arch/mips/loongson64/common/serial.c b/arch/mips/loongson64/common/serial.c
index ffefc1c..98c3a7f 100644
--- a/arch/mips/loongson64/common/serial.c
+++ b/arch/mips/loongson64/common/serial.c
@@ -110,7 +110,7 @@ static int __init serial_init(void)
 }
 module_init(serial_init);
 
-static void __init serial_exit(void)
+static void __exit serial_exit(void)
 {
 	platform_device_unregister(&uart8250_device);
 }
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 355f8ea..3944c49 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -654,6 +654,13 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
 				   int restore_scratch)
 {
 	if (restore_scratch) {
+		/*
+		 * Ensure the MFC0 below observes the value written to the
+		 * KScratch register by the prior MTC0.
+		 */
+		if (scratch_reg >= 0)
+			uasm_i_ehb(p);
+
 		/* Reset default page size */
 		if (PM_DEFAULT_MASK >> 16) {
 			uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
@@ -668,12 +675,10 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
 			uasm_i_mtc0(p, 0, C0_PAGEMASK);
 			uasm_il_b(p, r, lid);
 		}
-		if (scratch_reg >= 0) {
-			uasm_i_ehb(p);
+		if (scratch_reg >= 0)
 			UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
-		} else {
+		else
 			UASM_i_LW(p, 1, scratchpad_offset(0), 0);
-		}
 	} else {
 		/* Reset default page size */
 		if (PM_DEFAULT_MASK >> 16) {
@@ -922,6 +927,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
 	}
 	if (mode != not_refill && check_for_high_segbits) {
 		uasm_l_large_segbits_fault(l, *p);
+
+		if (mode == refill_scratch && scratch_reg >= 0)
+			uasm_i_ehb(p);
+
 		/*
 		 * We get here if we are an xsseg address, or if we are
 		 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
@@ -938,12 +947,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
 		uasm_i_jr(p, ptr);
 
 		if (mode == refill_scratch) {
-			if (scratch_reg >= 0) {
-				uasm_i_ehb(p);
+			if (scratch_reg >= 0)
 				UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
-			} else {
+			else
 				UASM_i_LW(p, 1, scratchpad_offset(0), 0);
-			}
 		} else {
 			uasm_i_nop(p);
 		}
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index 6f10312..c99fa1c 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -9,6 +9,7 @@
 	$(filter -mmicromips,$(KBUILD_CFLAGS)) \
 	$(filter -march=%,$(KBUILD_CFLAGS)) \
 	$(filter -m%-float,$(KBUILD_CFLAGS)) \
+	$(filter -mno-loongson-%,$(KBUILD_CFLAGS)) \
 	-D__VDSO__
 
 ifeq ($(cc-name),clang)
diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c
index 92a9b5f..f29f682 100644
--- a/arch/parisc/mm/ioremap.c
+++ b/arch/parisc/mm/ioremap.c
@@ -3,7 +3,7 @@
  * arch/parisc/mm/ioremap.c
  *
  * (C) Copyright 1995 1996 Linus Torvalds
- * (C) Copyright 2001-2006 Helge Deller <deller@gmx.de>
+ * (C) Copyright 2001-2019 Helge Deller <deller@gmx.de>
  * (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org>
  */
 
@@ -84,7 +84,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
 	addr = (void __iomem *) area->addr;
 	if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
 			       phys_addr, pgprot)) {
-		vfree(addr);
+		vunmap(addr);
 		return NULL;
 	}
 
@@ -92,9 +92,11 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
 }
 EXPORT_SYMBOL(__ioremap);
 
-void iounmap(const volatile void __iomem *addr)
+void iounmap(const volatile void __iomem *io_addr)
 {
-	if (addr > high_memory)
-		return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
+	unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
+
+	if (is_vmalloc_addr((void *)addr))
+		vunmap((void *)addr);
 }
 EXPORT_SYMBOL(iounmap);
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 29f49a3..6a6804c 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -212,7 +212,7 @@ static inline void cpu_feature_keys_init(void) { }
 #define CPU_FTR_POWER9_DD2_1		LONG_ASM_CONST(0x0000080000000000)
 #define CPU_FTR_P9_TM_HV_ASSIST		LONG_ASM_CONST(0x0000100000000000)
 #define CPU_FTR_P9_TM_XER_SO_BUG	LONG_ASM_CONST(0x0000200000000000)
-#define CPU_FTR_P9_TLBIE_BUG		LONG_ASM_CONST(0x0000400000000000)
+#define CPU_FTR_P9_TLBIE_STQ_BUG	LONG_ASM_CONST(0x0000400000000000)
 #define CPU_FTR_P9_TIDR			LONG_ASM_CONST(0x0000800000000000)
 
 #ifndef __ASSEMBLY__
@@ -460,7 +460,7 @@ static inline void cpu_feature_keys_init(void) { }
 	    CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
 	    CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
 	    CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_PKEY | \
-	    CPU_FTR_P9_TLBIE_BUG | CPU_FTR_P9_TIDR)
+	    CPU_FTR_P9_TLBIE_STQ_BUG | CPU_FTR_P9_TIDR)
 #define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9
 #define CPU_FTRS_POWER9_DD2_1 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1)
 #define CPU_FTRS_POWER9_DD2_2 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1 | \
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index f432054..f3b8e04 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -694,9 +694,35 @@ static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
 	return true;
 }
 
+/*
+ * Handle POWER9 broadcast tlbie invalidation issue using
+ * cpu feature flag.
+ */
+static __init void update_tlbie_feature_flag(unsigned long pvr)
+{
+	if (PVR_VER(pvr) == PVR_POWER9) {
+		/*
+		 * Set the tlbie feature flag for anything below
+		 * Nimbus DD 2.3 and Cumulus DD 1.3
+		 */
+		if ((pvr & 0xe000) == 0) {
+			/* Nimbus */
+			if ((pvr & 0xfff) < 0x203)
+				cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
+		} else if ((pvr & 0xc000) == 0) {
+			/* Cumulus */
+			if ((pvr & 0xfff) < 0x103)
+				cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
+		} else {
+			WARN_ONCE(1, "Unknown PVR");
+			cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
+		}
+	}
+}
+
 static __init void cpufeatures_cpu_quirks(void)
 {
-	int version = mfspr(SPRN_PVR);
+	unsigned long version = mfspr(SPRN_PVR);
 
 	/*
 	 * Not all quirks can be derived from the cpufeatures device tree.
@@ -715,10 +741,10 @@ static __init void cpufeatures_cpu_quirks(void)
 
 	if ((version & 0xffff0000) == 0x004e0000) {
 		cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
-		cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_BUG;
 		cur_cpu_spec->cpu_features |= CPU_FTR_P9_TIDR;
 	}
 
+	update_tlbie_feature_flag(version);
 	/*
 	 * PKEY was not in the initial base or feature node
 	 * specification, but it should become optional in the next
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index efdd16a..93e0677 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -45,6 +45,7 @@ static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT],
 					mce_ue_event_queue);
 
 static void machine_check_process_queued_event(struct irq_work *work);
+static void machine_check_ue_irq_work(struct irq_work *work);
 void machine_check_ue_event(struct machine_check_event *evt);
 static void machine_process_ue_event(struct work_struct *work);
 
@@ -52,6 +53,10 @@ static struct irq_work mce_event_process_work = {
         .func = machine_check_process_queued_event,
 };
 
+static struct irq_work mce_ue_event_irq_work = {
+	.func = machine_check_ue_irq_work,
+};
+
 DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
 
 static void mce_set_error_info(struct machine_check_event *mce,
@@ -208,6 +213,10 @@ void release_mce_event(void)
 	get_mce_event(NULL, true);
 }
 
+static void machine_check_ue_irq_work(struct irq_work *work)
+{
+	schedule_work(&mce_ue_event_work);
+}
 
 /*
  * Queue up the MCE event which then can be handled later.
@@ -225,7 +234,7 @@ void machine_check_ue_event(struct machine_check_event *evt)
 	memcpy(this_cpu_ptr(&mce_ue_event_queue[index]), evt, sizeof(*evt));
 
 	/* Queue work to process this event later. */
-	schedule_work(&mce_ue_event_work);
+	irq_work_queue(&mce_ue_event_irq_work);
 }
 
 /*
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index 3022d67..37a110b 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -39,6 +39,7 @@
 static unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
 {
 	pte_t *ptep;
+	unsigned int shift;
 	unsigned long flags;
 	struct mm_struct *mm;
 
@@ -48,13 +49,18 @@ static unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
 		mm = &init_mm;
 
 	local_irq_save(flags);
-	if (mm == current->mm)
-		ptep = find_current_mm_pte(mm->pgd, addr, NULL, NULL);
-	else
-		ptep = find_init_mm_pte(addr, NULL);
+	ptep = __find_linux_pte(mm->pgd, addr, NULL, &shift);
 	local_irq_restore(flags);
+
 	if (!ptep || pte_special(*ptep))
 		return ULONG_MAX;
+
+	if (shift > PAGE_SHIFT) {
+		unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
+
+		return pte_pfn(__pte(pte_val(*ptep) | (addr & rpnmask)));
+	}
+
 	return pte_pfn(*ptep);
 }
 
@@ -339,7 +345,7 @@ static const struct mce_derror_table mce_p9_derror_table[] = {
   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
 { 0, false, 0, 0, 0, 0 } };
 
-static int mce_find_instr_ea_and_pfn(struct pt_regs *regs, uint64_t *addr,
+static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr,
 					uint64_t *phys_addr)
 {
 	/*
@@ -530,7 +536,8 @@ static int mce_handle_derror(struct pt_regs *regs,
 			 * kernel/exception-64s.h
 			 */
 			if (get_paca()->in_mce < MAX_MCE_DEPTH)
-				mce_find_instr_ea_and_pfn(regs, addr, phys_addr);
+				mce_find_instr_ea_and_phys(regs, addr,
+							   phys_addr);
 		}
 		found = 1;
 	}
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 05b32cc..3ae3e8d 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1407,7 +1407,14 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
 		*val = get_reg_val(id, vcpu->arch.pspb);
 		break;
 	case KVM_REG_PPC_DPDES:
-		*val = get_reg_val(id, vcpu->arch.vcore->dpdes);
+		/*
+		 * On POWER9, where we are emulating msgsndp etc.,
+		 * we return 1 bit for each vcpu, which can come from
+		 * either vcore->dpdes or doorbell_request.
+		 * On POWER8, doorbell_request is 0.
+		 */
+		*val = get_reg_val(id, vcpu->arch.vcore->dpdes |
+				   vcpu->arch.doorbell_request);
 		break;
 	case KVM_REG_PPC_VTB:
 		*val = get_reg_val(id, vcpu->arch.vcore->vtb);
@@ -2550,7 +2557,7 @@ static void collect_piggybacks(struct core_info *cip, int target_threads)
 		if (!spin_trylock(&pvc->lock))
 			continue;
 		prepare_threads(pvc);
-		if (!pvc->n_runnable) {
+		if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) {
 			list_del_init(&pvc->preempt_list);
 			if (pvc->runner == NULL) {
 				pvc->vcore_state = VCORE_INACTIVE;
@@ -2571,15 +2578,20 @@ static void collect_piggybacks(struct core_info *cip, int target_threads)
 	spin_unlock(&lp->lock);
 }
 
-static bool recheck_signals(struct core_info *cip)
+static bool recheck_signals_and_mmu(struct core_info *cip)
 {
 	int sub, i;
 	struct kvm_vcpu *vcpu;
+	struct kvmppc_vcore *vc;
 
-	for (sub = 0; sub < cip->n_subcores; ++sub)
-		for_each_runnable_thread(i, vcpu, cip->vc[sub])
+	for (sub = 0; sub < cip->n_subcores; ++sub) {
+		vc = cip->vc[sub];
+		if (!vc->kvm->arch.mmu_ready)
+			return true;
+		for_each_runnable_thread(i, vcpu, vc)
 			if (signal_pending(vcpu->arch.run_task))
 				return true;
+	}
 	return false;
 }
 
@@ -2800,7 +2812,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
 	local_irq_disable();
 	hard_irq_disable();
 	if (lazy_irq_pending() || need_resched() ||
-	    recheck_signals(&core_info) || !vc->kvm->arch.mmu_ready) {
+	    recheck_signals_and_mmu(&core_info)) {
 		local_irq_enable();
 		vc->vcore_state = VCORE_INACTIVE;
 		/* Unlock all except the primary vcore */
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index a67cf1c..7c68d83 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -452,7 +452,7 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
 				     "r" (rbvalues[i]), "r" (kvm->arch.lpid));
 		}
 
-		if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
+		if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
 			/*
 			 * Need the extra ptesync to make sure we don't
 			 * re-order the tlbie
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 68c7591..f1878e1 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -2903,29 +2903,39 @@
 kvm_cede_exit:
 	ld	r9, HSTATE_KVM_VCPU(r13)
 #ifdef CONFIG_KVM_XICS
-	/* Abort if we still have a pending escalation */
-	lbz	r5, VCPU_XIVE_ESC_ON(r9)
-	cmpwi	r5, 0
-	beq	1f
-	li	r0, 0
-	stb	r0, VCPU_CEDED(r9)
-1:	/* Enable XIVE escalation */
-	li	r5, XIVE_ESB_SET_PQ_00
-	mfmsr	r0
-	andi.	r0, r0, MSR_DR		/* in real mode? */
-	beq	1f
+	/* are we using XIVE with single escalation? */
 	ld	r10, VCPU_XIVE_ESC_VADDR(r9)
 	cmpdi	r10, 0
 	beq	3f
-	ldx	r0, r10, r5
+	li	r6, XIVE_ESB_SET_PQ_00
+	/*
+	 * If we still have a pending escalation, abort the cede,
+	 * and we must set PQ to 10 rather than 00 so that we don't
+	 * potentially end up with two entries for the escalation
+	 * interrupt in the XIVE interrupt queue.  In that case
+	 * we also don't want to set xive_esc_on to 1 here in
+	 * case we race with xive_esc_irq().
+	 */
+	lbz	r5, VCPU_XIVE_ESC_ON(r9)
+	cmpwi	r5, 0
+	beq	4f
+	li	r0, 0
+	stb	r0, VCPU_CEDED(r9)
+	li	r6, XIVE_ESB_SET_PQ_10
+	b	5f
+4:	li	r0, 1
+	stb	r0, VCPU_XIVE_ESC_ON(r9)
+	/* make sure store to xive_esc_on is seen before xive_esc_irq runs */
+	sync
+5:	/* Enable XIVE escalation */
+	mfmsr	r0
+	andi.	r0, r0, MSR_DR		/* in real mode? */
+	beq	1f
+	ldx	r0, r10, r6
 	b	2f
 1:	ld	r10, VCPU_XIVE_ESC_RADDR(r9)
-	cmpdi	r10, 0
-	beq	3f
-	ldcix	r0, r10, r5
+	ldcix	r0, r10, r6
 2:	sync
-	li	r0, 1
-	stb	r0, VCPU_XIVE_ESC_ON(r9)
 #endif /* CONFIG_KVM_XICS */
 3:	b	guest_exit_cont
 
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index aae34f2..031f07f0 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -1037,20 +1037,22 @@ void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
 	/* Mask the VP IPI */
 	xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
 
-	/* Disable the VP */
-	xive_native_disable_vp(xc->vp_id);
-
-	/* Free the queues & associated interrupts */
+	/* Free escalations */
 	for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
-		struct xive_q *q = &xc->queues[i];
-
-		/* Free the escalation irq */
 		if (xc->esc_virq[i]) {
 			free_irq(xc->esc_virq[i], vcpu);
 			irq_dispose_mapping(xc->esc_virq[i]);
 			kfree(xc->esc_virq_names[i]);
 		}
-		/* Free the queue */
+	}
+
+	/* Disable the VP */
+	xive_native_disable_vp(xc->vp_id);
+
+	/* Free the queues */
+	for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
+		struct xive_q *q = &xc->queues[i];
+
 		xive_native_disable_queue(xc->vp_id, q, i);
 		if (q->qpage) {
 			free_pages((unsigned long)q->qpage,
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index aaa28fd..0c13561 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -203,7 +203,7 @@ static inline unsigned long  ___tlbie(unsigned long vpn, int psize,
 
 static inline void fixup_tlbie(unsigned long vpn, int psize, int apsize, int ssize)
 {
-	if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
+	if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
 		/* Need the extra ptesync to ensure we don't reorder tlbie*/
 		asm volatile("ptesync": : :"memory");
 		___tlbie(vpn, psize, apsize, ssize);
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 29fd894..b1007e9 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -37,6 +37,7 @@
 #include <linux/context_tracking.h>
 #include <linux/libfdt.h>
 #include <linux/pkeys.h>
+#include <linux/cpu.h>
 
 #include <asm/debugfs.h>
 #include <asm/processor.h>
@@ -1891,10 +1892,16 @@ static int hpt_order_get(void *data, u64 *val)
 
 static int hpt_order_set(void *data, u64 val)
 {
+	int ret;
+
 	if (!mmu_hash_ops.resize_hpt)
 		return -ENODEV;
 
-	return mmu_hash_ops.resize_hpt(val);
+	cpus_read_lock();
+	ret = mmu_hash_ops.resize_hpt(val);
+	cpus_read_unlock();
+
+	return ret;
 }
 
 DEFINE_SIMPLE_ATTRIBUTE(fops_hpt_order, hpt_order_get, hpt_order_set, "%llu\n");
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index fef3e1e..0cddae4 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -220,7 +220,7 @@ static inline void fixup_tlbie(void)
 	unsigned long pid = 0;
 	unsigned long va = ((1UL << 52) - 1);
 
-	if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
+	if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
 		asm volatile("ptesync": : :"memory");
 		__tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
 	}
@@ -230,7 +230,7 @@ static inline void fixup_tlbie_lpid(unsigned long lpid)
 {
 	unsigned long va = ((1UL << 52) - 1);
 
-	if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
+	if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
 		asm volatile("ptesync": : :"memory");
 		__tlbie_lpid_va(va, lpid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
 	}
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 38fe408..edf9032 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -680,7 +680,10 @@ static ssize_t symbol_map_read(struct file *fp, struct kobject *kobj,
 				       bin_attr->size);
 }
 
-static BIN_ATTR_RO(symbol_map, 0);
+static struct bin_attribute symbol_map_attr = {
+	.attr = {.name = "symbol_map", .mode = 0400},
+	.read = symbol_map_read
+};
 
 static void opal_export_symmap(void)
 {
@@ -697,10 +700,10 @@ static void opal_export_symmap(void)
 		return;
 
 	/* Setup attributes */
-	bin_attr_symbol_map.private = __va(be64_to_cpu(syms[0]));
-	bin_attr_symbol_map.size = be64_to_cpu(syms[1]);
+	symbol_map_attr.private = __va(be64_to_cpu(syms[0]));
+	symbol_map_attr.size = be64_to_cpu(syms[1]);
 
-	rc = sysfs_create_bin_file(opal_kobj, &bin_attr_symbol_map);
+	rc = sysfs_create_bin_file(opal_kobj, &symbol_map_attr);
 	if (rc)
 		pr_warn("Error %d creating OPAL symbols file\n", rc);
 }
diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
index 29e66d6e..15a5671 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
@@ -49,6 +49,9 @@ static __be64 *pnv_alloc_tce_level(int nid, unsigned int shift)
 	return addr;
 }
 
+static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
+		unsigned long size, unsigned int levels);
+
 static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc)
 {
 	__be64 *tmp = user ? tbl->it_userspace : (__be64 *) tbl->it_base;
@@ -58,9 +61,9 @@ static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc)
 
 	while (level) {
 		int n = (idx & mask) >> (level * shift);
-		unsigned long tce;
+		unsigned long oldtce, tce = be64_to_cpu(READ_ONCE(tmp[n]));
 
-		if (tmp[n] == 0) {
+		if (!tce) {
 			__be64 *tmp2;
 
 			if (!alloc)
@@ -71,10 +74,15 @@ static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc)
 			if (!tmp2)
 				return NULL;
 
-			tmp[n] = cpu_to_be64(__pa(tmp2) |
-					TCE_PCI_READ | TCE_PCI_WRITE);
+			tce = __pa(tmp2) | TCE_PCI_READ | TCE_PCI_WRITE;
+			oldtce = be64_to_cpu(cmpxchg(&tmp[n], 0,
+					cpu_to_be64(tce)));
+			if (oldtce) {
+				pnv_pci_ioda2_table_do_free_pages(tmp2,
+					ilog2(tbl->it_level_size) + 3, 1);
+				tce = oldtce;
+			}
 		}
-		tce = be64_to_cpu(tmp[n]);
 
 		tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE));
 		idx &= ~mask;
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 9e52b68..ea602f7 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -647,7 +647,10 @@ static int pseries_lpar_resize_hpt_commit(void *data)
 	return 0;
 }
 
-/* Must be called in user context */
+/*
+ * Must be called in process context. The caller must hold the
+ * cpus_lock.
+ */
 static int pseries_lpar_resize_hpt(unsigned long shift)
 {
 	struct hpt_resize_state state = {
@@ -699,7 +702,8 @@ static int pseries_lpar_resize_hpt(unsigned long shift)
 
 	t1 = ktime_get();
 
-	rc = stop_machine(pseries_lpar_resize_hpt_commit, &state, NULL);
+	rc = stop_machine_cpuslocked(pseries_lpar_resize_hpt_commit,
+				     &state, NULL);
 
 	t2 = ktime_get();
 
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index fa2c08e..a03821b 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -171,9 +171,13 @@
 	move a1, s4 /* scause */
 	tail do_IRQ
 1:
-	/* Exceptions run with interrupts enabled */
+	/* Exceptions run with interrupts enabled or disabled
+	   depending on the state of sstatus.SR_SPIE */
+	andi t0, s1, SR_SPIE
+	beqz t0, 1f
 	csrs sstatus, SR_SIE
 
+1:
 	/* Handle syscalls */
 	li t0, EXC_SYSCALL
 	beq s4, t0, handle_syscall
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 6e758bb..99ef537 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -183,20 +183,30 @@ unsigned long get_wchan(struct task_struct *p)
 
 	if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p))
 		return 0;
+
+	if (!try_get_task_stack(p))
+		return 0;
+
 	low = task_stack_page(p);
 	high = (struct stack_frame *) task_pt_regs(p);
 	sf = (struct stack_frame *) p->thread.ksp;
-	if (sf <= low || sf > high)
-		return 0;
+	if (sf <= low || sf > high) {
+		return_address = 0;
+		goto out;
+	}
 	for (count = 0; count < 16; count++) {
 		sf = (struct stack_frame *) sf->back_chain;
-		if (sf <= low || sf > high)
-			return 0;
+		if (sf <= low || sf > high) {
+			return_address = 0;
+			goto out;
+		}
 		return_address = sf->gprs[8];
 		if (!in_sched_functions(return_address))
-			return return_address;
+			goto out;
 	}
-	return 0;
+out:
+	put_task_stack(p);
+	return return_address;
 }
 
 unsigned long arch_align_stack(unsigned long sp)
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index e8184a1..7b96888 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -311,7 +311,8 @@ int arch_update_cpu_topology(void)
 	on_each_cpu(__arch_update_dedicated_flag, NULL, 0);
 	for_each_online_cpu(cpu) {
 		dev = get_cpu_device(cpu);
-		kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+		if (dev)
+			kobject_uevent(&dev->kobj, KOBJ_CHANGE);
 	}
 	return rc;
 }
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index e0551c9..fac1d4e 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -3890,7 +3890,7 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
 	const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
 				    | KVM_S390_MEMOP_F_CHECK_ONLY;
 
-	if (mop->flags & ~supported_flags)
+	if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
 		return -EINVAL;
 
 	if (mop->size > MEM_OP_MAX_SIZE)
diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig
index c3ab808..e2b132a 100644
--- a/arch/x86/configs/x86_64_cuttlefish_defconfig
+++ b/arch/x86/configs/x86_64_cuttlefish_defconfig
@@ -30,6 +30,7 @@
 # CONFIG_PCSPKR_PLATFORM is not set
 CONFIG_KALLSYMS_ALL=y
 CONFIG_BPF_SYSCALL=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
 CONFIG_EMBEDDED=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
@@ -203,6 +204,7 @@
 CONFIG_NET_CLS_ACT=y
 CONFIG_VSOCKETS=y
 CONFIG_VIRTIO_VSOCKETS=y
+CONFIG_BPF_JIT=y
 CONFIG_CFG80211=y
 CONFIG_MAC80211=y
 CONFIG_RFKILL=y
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index eb0f80c..3aa82de 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -21,7 +21,7 @@
 #define MWAIT_ECX_INTERRUPT_BREAK	0x1
 #define MWAITX_ECX_TIMER_ENABLE		BIT(1)
 #define MWAITX_MAX_LOOPS		((u32)-1)
-#define MWAITX_DISABLE_CSTATES		0xf
+#define MWAITX_DISABLE_CSTATES		0xf0
 
 static inline void __monitor(const void *eax, unsigned long ecx,
 			     unsigned long edx)
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 7685444..1455179 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -158,7 +158,8 @@ static int x2apic_dead_cpu(unsigned int dead_cpu)
 {
 	struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu);
 
-	cpumask_clear_cpu(dead_cpu, &cmsk->mask);
+	if (cmsk)
+		cpumask_clear_cpu(dead_cpu, &cmsk->mask);
 	free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
 	return 0;
 }
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 250cfa8..88dc38b 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -222,13 +222,31 @@ unsigned long __head __startup_64(unsigned long physaddr,
 	 * we might write invalid pmds, when the kernel is relocated
 	 * cleanup_highmap() fixes this up along with the mappings
 	 * beyond _end.
+	 *
+	 * Only the region occupied by the kernel image has so far
+	 * been checked against the table of usable memory regions
+	 * provided by the firmware, so invalidate pages outside that
+	 * region. A page table entry that maps to a reserved area of
+	 * memory would allow processor speculation into that area,
+	 * and on some hardware (particularly the UV platform) even
+	 * speculative access to some reserved areas is caught as an
+	 * error, causing the BIOS to halt the system.
 	 */
 
 	pmd = fixup_pointer(level2_kernel_pgt, physaddr);
-	for (i = 0; i < PTRS_PER_PMD; i++) {
+
+	/* invalidate pages before the kernel image */
+	for (i = 0; i < pmd_index((unsigned long)_text); i++)
+		pmd[i] &= ~_PAGE_PRESENT;
+
+	/* fixup pages that are part of the kernel image */
+	for (; i <= pmd_index((unsigned long)_end); i++)
 		if (pmd[i] & _PAGE_PRESENT)
 			pmd[i] += load_delta;
-	}
+
+	/* invalidate pages after the kernel image */
+	for (; i < PTRS_PER_PMD; i++)
+		pmd[i] &= ~_PAGE_PRESENT;
 
 	/*
 	 * Fixup phys_base - remove the memory encryption mask to obtain
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index e83f4f6..6f7b3ac 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8801,7 +8801,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
 		/* _system ok, nested_vmx_check_permission has verified cpl=0 */
 		if (kvm_write_guest_virt_system(vcpu, gva, &field_value,
 						(is_long_mode(vcpu) ? 8 : 4),
-						NULL))
+						&e))
 			kvm_inject_page_fault(vcpu, &e);
 	}
 
@@ -12574,7 +12574,7 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
 		/* VM-entry exception error code */
 		if (has_error_code &&
-		    vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))
+		    vmcs12->vm_entry_exception_error_code & GENMASK(31, 16))
 			return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
 
 		/* VM-entry interruption-info field: reserved bits */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 05cb585..6ae8a01 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -791,34 +791,42 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 }
 EXPORT_SYMBOL_GPL(kvm_set_xcr);
 
+static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+{
+	if (cr4 & CR4_RESERVED_BITS)
+		return -EINVAL;
+
+	if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE))
+		return -EINVAL;
+
+	if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP))
+		return -EINVAL;
+
+	if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP))
+		return -EINVAL;
+
+	if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE))
+		return -EINVAL;
+
+	if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE))
+		return -EINVAL;
+
+	if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57))
+		return -EINVAL;
+
+	if (!guest_cpuid_has(vcpu, X86_FEATURE_UMIP) && (cr4 & X86_CR4_UMIP))
+		return -EINVAL;
+
+	return 0;
+}
+
 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
 	unsigned long old_cr4 = kvm_read_cr4(vcpu);
 	unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
 				   X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
 
-	if (cr4 & CR4_RESERVED_BITS)
-		return 1;
-
-	if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE))
-		return 1;
-
-	if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP))
-		return 1;
-
-	if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP))
-		return 1;
-
-	if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE))
-		return 1;
-
-	if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE))
-		return 1;
-
-	if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57))
-		return 1;
-
-	if (!guest_cpuid_has(vcpu, X86_FEATURE_UMIP) && (cr4 & X86_CR4_UMIP))
+	if (kvm_valid_cr4(vcpu, cr4))
 		return 1;
 
 	if (is_long_mode(vcpu)) {
@@ -8237,10 +8245,6 @@ EXPORT_SYMBOL_GPL(kvm_task_switch);
 
 static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 {
-	if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
-			(sregs->cr4 & X86_CR4_OSXSAVE))
-		return  -EINVAL;
-
 	if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
 		/*
 		 * When EFER.LME and CR0.PG are set, the processor is in
@@ -8259,7 +8263,7 @@ static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 			return -EINVAL;
 	}
 
-	return 0;
+	return kvm_valid_cr4(vcpu, sregs->cr4);
 }
 
 static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index f5b7f1b..614c2c6b 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -113,8 +113,8 @@ static void delay_mwaitx(unsigned long __loops)
 		__monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0);
 
 		/*
-		 * AMD, like Intel, supports the EAX hint and EAX=0xf
-		 * means, do not enter any deep C-state and we use it
+		 * AMD, like Intel's MWAIT version, supports the EAX hint and
+		 * EAX=0xf0 means, do not enter any deep C-state and we use it
 		 * here in delay() to minimize wakeup latency.
 		 */
 		__mwaitx(MWAITX_DISABLE_CSTATES, delay, MWAITX_ECX_TIMER_ENABLE);
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index 10fb42d..b81b517 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -23,6 +23,7 @@
 
 PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
 PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss
+PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN)
 
 # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
 # in turn leaves some undefined symbols like __fentry__ in purgatory and not
diff --git a/arch/x86/xen/efi.c b/arch/x86/xen/efi.c
index 1804b27..66bcdee 100644
--- a/arch/x86/xen/efi.c
+++ b/arch/x86/xen/efi.c
@@ -77,7 +77,9 @@ static efi_system_table_t __init *xen_efi_probe(void)
 	efi.get_variable             = xen_efi_get_variable;
 	efi.get_next_variable        = xen_efi_get_next_variable;
 	efi.set_variable             = xen_efi_set_variable;
+	efi.set_variable_nonblocking = xen_efi_set_variable;
 	efi.query_variable_info      = xen_efi_query_variable_info;
+	efi.query_variable_info_nonblocking = xen_efi_query_variable_info;
 	efi.update_capsule           = xen_efi_update_capsule;
 	efi.query_capsule_caps       = xen_efi_query_capsule_caps;
 	efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count;
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
index 04f19de..4092555 100644
--- a/arch/xtensa/kernel/xtensa_ksyms.c
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -119,13 +119,6 @@ EXPORT_SYMBOL(__invalidate_icache_range);
 // FIXME EXPORT_SYMBOL(screen_info);
 #endif
 
-EXPORT_SYMBOL(outsb);
-EXPORT_SYMBOL(outsw);
-EXPORT_SYMBOL(outsl);
-EXPORT_SYMBOL(insb);
-EXPORT_SYMBOL(insw);
-EXPORT_SYMBOL(insl);
-
 extern long common_exception_return;
 EXPORT_SYMBOL(common_exception_return);
 
diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
index 0005dfd..43bcd4e 100644
--- a/block/blk-rq-qos.c
+++ b/block/blk-rq-qos.c
@@ -148,24 +148,27 @@ bool rq_depth_calc_max_depth(struct rq_depth *rqd)
 	return ret;
 }
 
-void rq_depth_scale_up(struct rq_depth *rqd)
+/* Returns true on success and false if scaling up wasn't possible */
+bool rq_depth_scale_up(struct rq_depth *rqd)
 {
 	/*
 	 * Hit max in previous round, stop here
 	 */
 	if (rqd->scaled_max)
-		return;
+		return false;
 
 	rqd->scale_step--;
 
 	rqd->scaled_max = rq_depth_calc_max_depth(rqd);
+	return true;
 }
 
 /*
  * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
- * had a latency violation.
+ * had a latency violation. Returns true on success and returns false if
+ * scaling down wasn't possible.
  */
-void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
+bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
 {
 	/*
 	 * Stop scaling down when we've hit the limit. This also prevents
@@ -173,7 +176,7 @@ void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
 	 * keep up.
 	 */
 	if (rqd->max_depth == 1)
-		return;
+		return false;
 
 	if (rqd->scale_step < 0 && hard_throttle)
 		rqd->scale_step = 0;
@@ -182,6 +185,7 @@ void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
 
 	rqd->scaled_max = false;
 	rq_depth_calc_max_depth(rqd);
+	return true;
 }
 
 void rq_qos_exit(struct request_queue *q)
diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
index 32b02ef..98caba3 100644
--- a/block/blk-rq-qos.h
+++ b/block/blk-rq-qos.h
@@ -80,22 +80,19 @@ static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
 
 static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
 {
-	struct rq_qos *cur, *prev = NULL;
-	for (cur = q->rq_qos; cur; cur = cur->next) {
-		if (cur == rqos) {
-			if (prev)
-				prev->next = rqos->next;
-			else
-				q->rq_qos = cur;
+	struct rq_qos **cur;
+
+	for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
+		if (*cur == rqos) {
+			*cur = rqos->next;
 			break;
 		}
-		prev = cur;
 	}
 }
 
 bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
-void rq_depth_scale_up(struct rq_depth *rqd);
-void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
+bool rq_depth_scale_up(struct rq_depth *rqd);
+bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
 bool rq_depth_calc_max_depth(struct rq_depth *rqd);
 
 void rq_qos_cleanup(struct request_queue *, struct bio *);
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 0c62bf4..f1de8ba 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -307,7 +307,8 @@ static void calc_wb_limits(struct rq_wb *rwb)
 
 static void scale_up(struct rq_wb *rwb)
 {
-	rq_depth_scale_up(&rwb->rq_depth);
+	if (!rq_depth_scale_up(&rwb->rq_depth))
+		return;
 	calc_wb_limits(rwb);
 	rwb->unknown_cnt = 0;
 	rwb_wake_all(rwb);
@@ -316,7 +317,8 @@ static void scale_up(struct rq_wb *rwb)
 
 static void scale_down(struct rq_wb *rwb, bool hard_throttle)
 {
-	rq_depth_scale_down(&rwb->rq_depth, hard_throttle);
+	if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle))
+		return;
 	calc_wb_limits(rwb);
 	rwb->unknown_cnt = 0;
 	rwb_trace_step(rwb, "scale down");
diff --git a/build.config.aarch64 b/build.config.aarch64
new file mode 100644
index 0000000..523bbc0
--- /dev/null
+++ b/build.config.aarch64
@@ -0,0 +1,11 @@
+ARCH=arm64
+
+CLANG_TRIPLE=aarch64-linux-gnu-
+CROSS_COMPILE=aarch64-linux-androidkernel-
+LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin
+
+FILES="
+arch/arm64/boot/Image.gz
+vmlinux
+System.map
+"
diff --git a/build.config.common b/build.config.common
new file mode 100644
index 0000000..707de4f
--- /dev/null
+++ b/build.config.common
@@ -0,0 +1,9 @@
+BRANCH=android-4.19-q
+KERNEL_DIR=common
+
+CC=clang
+CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r365631c/bin
+
+EXTRA_CMDS=''
+STOP_SHIP_TRACEPRINTK=1
+LD=ld.lld
diff --git a/build.config.cuttlefish.aarch64 b/build.config.cuttlefish.aarch64
index da780202..0cb6019 100644
--- a/build.config.cuttlefish.aarch64
+++ b/build.config.cuttlefish.aarch64
@@ -1,18 +1,5 @@
-ARCH=arm64
-BRANCH=android-4.19
-CC=clang
-CLANG_TRIPLE=aarch64-linux-gnu-
-CROSS_COMPILE=aarch64-linux-androidkernel-
+. ${ROOT_DIR}/common/build.config.common
+. ${ROOT_DIR}/common/build.config.aarch64
+
 DEFCONFIG=cuttlefish_defconfig
-EXTRA_CMDS=''
-KERNEL_DIR=common
 POST_DEFCONFIG_CMDS="check_defconfig"
-CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r353983c/bin
-LD=ld.lld
-LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin
-FILES="
-arch/arm64/boot/Image.gz
-vmlinux
-System.map
-"
-STOP_SHIP_TRACEPRINTK=1
diff --git a/build.config.cuttlefish.x86_64 b/build.config.cuttlefish.x86_64
index da47330..fed773c 100644
--- a/build.config.cuttlefish.x86_64
+++ b/build.config.cuttlefish.x86_64
@@ -1,18 +1,5 @@
-ARCH=x86_64
-BRANCH=android-4.19
-CC=clang
-CLANG_TRIPLE=x86_64-linux-gnu-
-CROSS_COMPILE=x86_64-linux-androidkernel-
+. ${ROOT_DIR}/common/build.config.common
+. ${ROOT_DIR}/common/build.config.x86_64
+
 DEFCONFIG=x86_64_cuttlefish_defconfig
-EXTRA_CMDS=''
-KERNEL_DIR=common
 POST_DEFCONFIG_CMDS="check_defconfig"
-CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r353983c/bin
-LD=ld.lld
-LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9/bin
-FILES="
-arch/x86/boot/bzImage
-vmlinux
-System.map
-"
-STOP_SHIP_TRACEPRINTK=1
diff --git a/build.config.x86_64 b/build.config.x86_64
new file mode 100644
index 0000000..df73a47
--- /dev/null
+++ b/build.config.x86_64
@@ -0,0 +1,11 @@
+ARCH=x86_64
+
+CLANG_TRIPLE=x86_64-linux-gnu-
+CROSS_COMPILE=x86_64-linux-androidkernel-
+LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9/bin
+
+FILES="
+arch/x86/boot/bzImage
+vmlinux
+System.map
+"
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index fff74f1..61e562f 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -95,7 +95,7 @@ static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
 	return max(start, end_page);
 }
 
-static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
+static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
 {
 	u8 *addr;
 
@@ -103,19 +103,21 @@ static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
 	addr = skcipher_get_spot(addr, bsize);
 	scatterwalk_copychunks(addr, &walk->out, bsize,
 			       (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
+	return 0;
 }
 
 int skcipher_walk_done(struct skcipher_walk *walk, int err)
 {
-	unsigned int n; /* bytes processed */
-	bool more;
+	unsigned int n = walk->nbytes;
+	unsigned int nbytes = 0;
 
-	if (unlikely(err < 0))
+	if (!n)
 		goto finish;
 
-	n = walk->nbytes - err;
-	walk->total -= n;
-	more = (walk->total != 0);
+	if (likely(err >= 0)) {
+		n -= err;
+		nbytes = walk->total - n;
+	}
 
 	if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
 				    SKCIPHER_WALK_SLOW |
@@ -131,7 +133,7 @@ int skcipher_walk_done(struct skcipher_walk *walk, int err)
 		memcpy(walk->dst.virt.addr, walk->page, n);
 		skcipher_unmap_dst(walk);
 	} else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
-		if (err) {
+		if (err > 0) {
 			/*
 			 * Didn't process all bytes.  Either the algorithm is
 			 * broken, or this was the last step and it turned out
@@ -139,27 +141,29 @@ int skcipher_walk_done(struct skcipher_walk *walk, int err)
 			 * the algorithm requires it.
 			 */
 			err = -EINVAL;
-			goto finish;
-		}
-		skcipher_done_slow(walk, n);
-		goto already_advanced;
+			nbytes = 0;
+		} else
+			n = skcipher_done_slow(walk, n);
 	}
 
+	if (err > 0)
+		err = 0;
+
+	walk->total = nbytes;
+	walk->nbytes = 0;
+
 	scatterwalk_advance(&walk->in, n);
 	scatterwalk_advance(&walk->out, n);
-already_advanced:
-	scatterwalk_done(&walk->in, 0, more);
-	scatterwalk_done(&walk->out, 1, more);
+	scatterwalk_done(&walk->in, 0, nbytes);
+	scatterwalk_done(&walk->out, 1, nbytes);
 
-	if (more) {
+	if (nbytes) {
 		crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
 			     CRYPTO_TFM_REQ_MAY_SLEEP : 0);
 		return skcipher_walk_next(walk);
 	}
-	err = 0;
-finish:
-	walk->nbytes = 0;
 
+finish:
 	/* Short-circuit for the common/fast path. */
 	if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
 		goto out;
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index a1aa598..41228e5 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -909,8 +909,8 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr)
 			pcc_data[pcc_ss_id]->refcount--;
 			if (!pcc_data[pcc_ss_id]->refcount) {
 				pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
-				pcc_data[pcc_ss_id]->pcc_channel_acquired = 0;
 				kfree(pcc_data[pcc_ss_id]);
+				pcc_data[pcc_ss_id] = NULL;
 			}
 		}
 	}
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index da031b1..9dbf86a0 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -510,6 +510,44 @@ static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag)
 }
 
 /**
+ * check_acpi_cpu_flag() - Determine if CPU node has a flag set
+ * @cpu: Kernel logical CPU number
+ * @rev: The minimum PPTT revision defining the flag
+ * @flag: The flag itself
+ *
+ * Check the node representing a CPU for a given flag.
+ *
+ * Return: -ENOENT if the PPTT doesn't exist, the CPU cannot be found or
+ *	   the table revision isn't new enough.
+ *	   1, any passed flag set
+ *	   0, flag unset
+ */
+static int check_acpi_cpu_flag(unsigned int cpu, int rev, u32 flag)
+{
+	struct acpi_table_header *table;
+	acpi_status status;
+	u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
+	struct acpi_pptt_processor *cpu_node = NULL;
+	int ret = -ENOENT;
+
+	status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
+	if (ACPI_FAILURE(status)) {
+		pr_warn_once("No PPTT table found, cpu topology may be inaccurate\n");
+		return ret;
+	}
+
+	if (table->revision >= rev)
+		cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
+
+	if (cpu_node)
+		ret = (cpu_node->flags & flag) != 0;
+
+	acpi_put_table(table);
+
+	return ret;
+}
+
+/**
  * acpi_find_last_cache_level() - Determines the number of cache levels for a PE
  * @cpu: Kernel logical cpu number
  *
@@ -574,6 +612,20 @@ int cache_setup_acpi(unsigned int cpu)
 }
 
 /**
+ * acpi_pptt_cpu_is_thread() - Determine if CPU is a thread
+ * @cpu: Kernel logical CPU number
+ *
+ * Return: 1, a thread
+ *         0, not a thread
+ *         -ENOENT ,if the PPTT doesn't exist, the CPU cannot be found or
+ *         the table revision isn't new enough.
+ */
+int acpi_pptt_cpu_is_thread(unsigned int cpu)
+{
+	return check_acpi_cpu_flag(cpu, 2, ACPI_PPTT_ACPI_PROCESSOR_IS_THREAD);
+}
+
+/**
  * find_acpi_cpu_topology() - Determine a unique topology value for a given cpu
  * @cpu: Kernel logical cpu number
  * @level: The topological level for which we would like a unique ID
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 5d110b1..fa1c5a4 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1633,7 +1633,9 @@ static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hp
 	 */
 	if (!id || id->vendor != PCI_VENDOR_ID_INTEL)
 		return;
-	if (((enum board_ids) id->driver_data) < board_ahci_pcs7)
+
+	/* Skip applying the quirk on Denverton and beyond */
+	if (((enum board_ids) id->driver_data) >= board_ahci_pcs7)
 		return;
 
 	/*
diff --git a/drivers/base/core.c b/drivers/base/core.c
index dc70527..9dfab55 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -8,6 +8,7 @@
  * Copyright (c) 2006 Novell, Inc.
  */
 
+#include <linux/cpufreq.h>
 #include <linux/device.h>
 #include <linux/err.h>
 #include <linux/fwnode.h>
@@ -2948,6 +2949,8 @@ void device_shutdown(void)
 	wait_for_device_probe();
 	device_block_probing();
 
+	cpufreq_suspend();
+
 	spin_lock(&devices_kset->list_lock);
 	/*
 	 * Walk the devices list backward, shutting down each in turn.
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index b4a1e88..3fa026c 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -635,6 +635,9 @@ store_soft_offline_page(struct device *dev,
 	pfn >>= PAGE_SHIFT;
 	if (!pfn_valid(pfn))
 		return -ENXIO;
+	/* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
+	if (!pfn_to_online_page(pfn))
+		return -EIO;
 	ret = soft_offline_page(pfn_to_page(pfn), 0);
 	return ret == 0 ? count : ret;
 }
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index b1c7009..bc2fa4e 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -106,6 +106,7 @@ struct nbd_device {
 	struct nbd_config *config;
 	struct mutex config_lock;
 	struct gendisk *disk;
+	struct workqueue_struct *recv_workq;
 
 	struct list_head list;
 	struct task_struct *task_recv;
@@ -132,9 +133,10 @@ static struct dentry *nbd_dbg_dir;
 
 #define NBD_MAGIC 0x68797548
 
+#define NBD_DEF_BLKSIZE 1024
+
 static unsigned int nbds_max = 16;
 static int max_part = 16;
-static struct workqueue_struct *recv_workqueue;
 static int part_shift;
 
 static int nbd_dev_dbg_init(struct nbd_device *nbd);
@@ -1025,7 +1027,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
 		/* We take the tx_mutex in an error path in the recv_work, so we
 		 * need to queue_work outside of the tx_mutex.
 		 */
-		queue_work(recv_workqueue, &args->work);
+		queue_work(nbd->recv_workq, &args->work);
 
 		atomic_inc(&config->live_connections);
 		wake_up(&config->conn_wait);
@@ -1126,6 +1128,10 @@ static void nbd_config_put(struct nbd_device *nbd)
 		kfree(nbd->config);
 		nbd->config = NULL;
 
+		if (nbd->recv_workq)
+			destroy_workqueue(nbd->recv_workq);
+		nbd->recv_workq = NULL;
+
 		nbd->tag_set.timeout = 0;
 		nbd->disk->queue->limits.discard_granularity = 0;
 		nbd->disk->queue->limits.discard_alignment = 0;
@@ -1154,6 +1160,14 @@ static int nbd_start_device(struct nbd_device *nbd)
 		return -EINVAL;
 	}
 
+	nbd->recv_workq = alloc_workqueue("knbd%d-recv",
+					  WQ_MEM_RECLAIM | WQ_HIGHPRI |
+					  WQ_UNBOUND, 0, nbd->index);
+	if (!nbd->recv_workq) {
+		dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
+		return -ENOMEM;
+	}
+
 	blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
 	nbd->task_recv = current;
 
@@ -1184,7 +1198,7 @@ static int nbd_start_device(struct nbd_device *nbd)
 		INIT_WORK(&args->work, recv_work);
 		args->nbd = nbd;
 		args->index = i;
-		queue_work(recv_workqueue, &args->work);
+		queue_work(nbd->recv_workq, &args->work);
 	}
 	nbd_size_update(nbd);
 	return error;
@@ -1204,8 +1218,10 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
 	mutex_unlock(&nbd->config_lock);
 	ret = wait_event_interruptible(config->recv_wq,
 					 atomic_read(&config->recv_threads) == 0);
-	if (ret)
+	if (ret) {
 		sock_shutdown(nbd);
+		flush_workqueue(nbd->recv_workq);
+	}
 	mutex_lock(&nbd->config_lock);
 	nbd_bdev_reset(bdev);
 	/* user requested, ignore socket errors */
@@ -1227,6 +1243,14 @@ static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
 		nbd_config_put(nbd);
 }
 
+static bool nbd_is_valid_blksize(unsigned long blksize)
+{
+	if (!blksize || !is_power_of_2(blksize) || blksize < 512 ||
+	    blksize > PAGE_SIZE)
+		return false;
+	return true;
+}
+
 /* Must be called with config_lock held */
 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
 		       unsigned int cmd, unsigned long arg)
@@ -1242,8 +1266,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
 	case NBD_SET_SOCK:
 		return nbd_add_socket(nbd, arg, false);
 	case NBD_SET_BLKSIZE:
-		if (!arg || !is_power_of_2(arg) || arg < 512 ||
-		    arg > PAGE_SIZE)
+		if (!arg)
+			arg = NBD_DEF_BLKSIZE;
+		if (!nbd_is_valid_blksize(arg))
 			return -EINVAL;
 		nbd_size_set(nbd, arg,
 			     div_s64(config->bytesize, arg));
@@ -1323,7 +1348,7 @@ static struct nbd_config *nbd_alloc_config(void)
 	atomic_set(&config->recv_threads, 0);
 	init_waitqueue_head(&config->recv_wq);
 	init_waitqueue_head(&config->conn_wait);
-	config->blksize = 1024;
+	config->blksize = NBD_DEF_BLKSIZE;
 	atomic_set(&config->live_connections, 0);
 	try_module_get(THIS_MODULE);
 	return config;
@@ -1759,6 +1784,12 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
 	if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) {
 		u64 bsize =
 			nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
+		if (!bsize)
+			bsize = NBD_DEF_BLKSIZE;
+		if (!nbd_is_valid_blksize(bsize)) {
+			ret = -EINVAL;
+			goto out;
+		}
 		nbd_size_set(nbd, bsize, div64_u64(config->bytesize, bsize));
 	}
 	if (info->attrs[NBD_ATTR_TIMEOUT]) {
@@ -1835,6 +1866,12 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
 	nbd_disconnect(nbd);
 	nbd_clear_sock(nbd);
 	mutex_unlock(&nbd->config_lock);
+	/*
+	 * Make sure recv thread has finished, so it does not drop the last
+	 * config ref and try to destroy the workqueue from inside the work
+	 * queue.
+	 */
+	flush_workqueue(nbd->recv_workq);
 	if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
 			       &nbd->config->runtime_flags))
 		nbd_config_put(nbd);
@@ -2215,20 +2252,12 @@ static int __init nbd_init(void)
 
 	if (nbds_max > 1UL << (MINORBITS - part_shift))
 		return -EINVAL;
-	recv_workqueue = alloc_workqueue("knbd-recv",
-					 WQ_MEM_RECLAIM | WQ_HIGHPRI |
-					 WQ_UNBOUND, 0);
-	if (!recv_workqueue)
-		return -ENOMEM;
 
-	if (register_blkdev(NBD_MAJOR, "nbd")) {
-		destroy_workqueue(recv_workqueue);
+	if (register_blkdev(NBD_MAJOR, "nbd"))
 		return -EIO;
-	}
 
 	if (genl_register_family(&nbd_genl_family)) {
 		unregister_blkdev(NBD_MAJOR, "nbd");
-		destroy_workqueue(recv_workqueue);
 		return -EINVAL;
 	}
 	nbd_dbg_init();
@@ -2270,7 +2299,6 @@ static void __exit nbd_cleanup(void)
 
 	idr_destroy(&nbd_index_idr);
 	genl_unregister_family(&nbd_genl_family);
-	destroy_workqueue(recv_workqueue);
 	unregister_blkdev(NBD_MAJOR, "nbd");
 }
 
diff --git a/drivers/bus/mhi/controllers/mhi_qcom.c b/drivers/bus/mhi/controllers/mhi_qcom.c
index 0f53142..9bb7717 100644
--- a/drivers/bus/mhi/controllers/mhi_qcom.c
+++ b/drivers/bus/mhi/controllers/mhi_qcom.c
@@ -533,6 +533,19 @@ static int mhi_lpm_enable(struct mhi_controller *mhi_cntrl, void *priv)
 	return ret;
 }
 
+void mhi_qcom_store_hwinfo(struct mhi_controller *mhi_cntrl)
+{
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	int i;
+
+	mhi_dev->serial_num = readl_relaxed(mhi_cntrl->bhi +
+			MHI_BHI_SERIAL_NUM_OFFS);
+
+	for (i = 0; i < ARRAY_SIZE(mhi_dev->oem_pk_hash); i++)
+		mhi_dev->oem_pk_hash[i] = readl_relaxed(mhi_cntrl->bhi +
+			MHI_BHI_OEMPKHASH(i));
+}
+
 static int mhi_qcom_power_up(struct mhi_controller *mhi_cntrl)
 {
 	enum mhi_dev_state dev_state = mhi_get_mhi_state(mhi_cntrl);
@@ -569,6 +582,10 @@ static int mhi_qcom_power_up(struct mhi_controller *mhi_cntrl)
 
 	ret = mhi_async_power_up(mhi_cntrl);
 
+	/* Update modem serial Info */
+	if (!ret)
+		mhi_qcom_store_hwinfo(mhi_cntrl);
+
 	/* power up create the dentry */
 	if (mhi_cntrl->dentry) {
 		debugfs_create_file("m0", 0444, mhi_cntrl->dentry, mhi_cntrl,
@@ -678,9 +695,45 @@ static ssize_t power_up_store(struct device *dev,
 }
 static DEVICE_ATTR_WO(power_up);
 
+static ssize_t serial_info_show(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	struct mhi_device *mhi_device = to_mhi_device(dev);
+	struct mhi_controller *mhi_cntrl = mhi_device->mhi_cntrl;
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	int n;
+
+	n = scnprintf(buf, PAGE_SIZE, "Serial Number:%u\n",
+		      mhi_dev->serial_num);
+
+	return n;
+}
+static DEVICE_ATTR_RO(serial_info);
+
+static ssize_t oempkhash_info_show(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	struct mhi_device *mhi_device = to_mhi_device(dev);
+	struct mhi_controller *mhi_cntrl = mhi_device->mhi_cntrl;
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	int i, n = 0;
+
+	for (i = 0; i < ARRAY_SIZE(mhi_dev->oem_pk_hash); i++)
+		n += scnprintf(buf + n, PAGE_SIZE - n, "OEMPKHASH[%d]:%u\n",
+		      i, mhi_dev->oem_pk_hash[i]);
+
+	return n;
+}
+static DEVICE_ATTR_RO(oempkhash_info);
+
+
 static struct attribute *mhi_qcom_attrs[] = {
 	&dev_attr_timeout_ms.attr,
 	&dev_attr_power_up.attr,
+	&dev_attr_serial_info.attr,
+	&dev_attr_oempkhash_info.attr,
 	NULL
 };
 
diff --git a/drivers/bus/mhi/controllers/mhi_qcom.h b/drivers/bus/mhi/controllers/mhi_qcom.h
index 53145a1..3604863 100644
--- a/drivers/bus/mhi/controllers/mhi_qcom.h
+++ b/drivers/bus/mhi/controllers/mhi_qcom.h
@@ -14,6 +14,10 @@
 #define MHI_PCIE_VENDOR_ID (0x17cb)
 #define MHI_PCIE_DEBUG_ID (0xffff)
 
+#define MHI_BHI_SERIAL_NUM_OFFS (0x40)
+#define MHI_BHI_OEMPKHASH(n) (0x64 + (0x4 * (n)))
+#define MHI_BHI_OEMPKHASH_SEG (16)
+
 /* runtime suspend timer */
 #define MHI_RPM_SUSPEND_TMR_MS (250)
 #define MHI_PCI_BAR_NUM (0)
@@ -49,6 +53,10 @@ struct mhi_dev {
 	dma_addr_t iova_stop;
 	enum mhi_suspend_mode suspend_mode;
 
+	/* hardware info */
+	u32 serial_num;
+	u32 oem_pk_hash[MHI_BHI_OEMPKHASH_SEG];
+
 	unsigned int lpm_disable_depth;
 	/* lock to toggle low power modes */
 	spinlock_t lpm_lock;
diff --git a/drivers/bus/mhi/core/mhi_init.c b/drivers/bus/mhi/core/mhi_init.c
index 6f4c390..7f68495 100644
--- a/drivers/bus/mhi/core/mhi_init.c
+++ b/drivers/bus/mhi/core/mhi_init.c
@@ -39,6 +39,7 @@ const char * const mhi_state_tran_str[MHI_ST_TRANSITION_MAX] = {
 	[MHI_ST_TRANSITION_READY] = "READY",
 	[MHI_ST_TRANSITION_SBL] = "SBL",
 	[MHI_ST_TRANSITION_MISSION_MODE] = "MISSION MODE",
+	[MHI_ST_TRANSITION_DISABLE] = "DISABLE",
 };
 
 const char * const mhi_state_str[MHI_STATE_MAX] = {
@@ -1341,7 +1342,6 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl)
 	spin_lock_init(&mhi_cntrl->wlock);
 	INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
 	INIT_WORK(&mhi_cntrl->fw_worker, mhi_fw_load_worker);
-	INIT_WORK(&mhi_cntrl->syserr_worker, mhi_pm_sys_err_worker);
 	INIT_WORK(&mhi_cntrl->low_priority_worker, mhi_low_priority_worker);
 	init_waitqueue_head(&mhi_cntrl->state_event);
 
diff --git a/drivers/bus/mhi/core/mhi_internal.h b/drivers/bus/mhi/core/mhi_internal.h
index 3b82e338..e4e80f9 100644
--- a/drivers/bus/mhi/core/mhi_internal.h
+++ b/drivers/bus/mhi/core/mhi_internal.h
@@ -429,6 +429,7 @@ enum MHI_ST_TRANSITION {
 	MHI_ST_TRANSITION_READY,
 	MHI_ST_TRANSITION_SBL,
 	MHI_ST_TRANSITION_MISSION_MODE,
+	MHI_ST_TRANSITION_DISABLE,
 	MHI_ST_TRANSITION_MAX,
 };
 
@@ -586,6 +587,7 @@ struct mhi_pm_transitions {
 struct state_transition {
 	struct list_head node;
 	enum MHI_ST_TRANSITION state;
+	enum MHI_PM_STATE pm_state;
 };
 
 struct mhi_ctxt {
@@ -744,7 +746,7 @@ int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
 			       enum MHI_ST_TRANSITION state);
 void mhi_pm_st_worker(struct work_struct *work);
 void mhi_fw_load_worker(struct work_struct *work);
-void mhi_pm_sys_err_worker(struct work_struct *work);
+void mhi_process_sys_err(struct mhi_controller *mhi_cntrl);
 void mhi_low_priority_worker(struct work_struct *work);
 int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
 void mhi_ctrl_ev_task(unsigned long data);
diff --git a/drivers/bus/mhi/core/mhi_main.c b/drivers/bus/mhi/core/mhi_main.c
index 0b339fd..4eed696 100644
--- a/drivers/bus/mhi/core/mhi_main.c
+++ b/drivers/bus/mhi/core/mhi_main.c
@@ -1194,8 +1194,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
 							MHI_PM_SYS_ERR_DETECT);
 				write_unlock_irq(&mhi_cntrl->pm_lock);
 				if (new_state == MHI_PM_SYS_ERR_DETECT)
-					schedule_work(
-						&mhi_cntrl->syserr_worker);
+					mhi_process_sys_err(mhi_cntrl);
 				break;
 			}
 			default:
@@ -1539,7 +1538,7 @@ void mhi_ctrl_ev_task(unsigned long data)
 		}
 		write_unlock_irq(&mhi_cntrl->pm_lock);
 		if (pm_state == MHI_PM_SYS_ERR_DETECT)
-			schedule_work(&mhi_cntrl->syserr_worker);
+			mhi_process_sys_err(mhi_cntrl);
 	}
 }
 
@@ -1624,7 +1623,7 @@ irqreturn_t mhi_intvec_threaded_handlr(int irq_number, void *dev)
 			mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
 					     MHI_CB_FATAL_ERROR);
 		else
-			schedule_work(&mhi_cntrl->syserr_worker);
+			mhi_process_sys_err(mhi_cntrl);
 	}
 
 exit_intvec:
diff --git a/drivers/bus/mhi/core/mhi_pm.c b/drivers/bus/mhi/core/mhi_pm.c
index b916acb..3c79c0b 100644
--- a/drivers/bus/mhi/core/mhi_pm.c
+++ b/drivers/bus/mhi/core/mhi_pm.c
@@ -155,9 +155,6 @@ enum MHI_PM_STATE __must_check mhi_tryset_pm_state(
 	MHI_VERB("Transition to pm state from:%s to:%s\n",
 		 to_mhi_pm_state_str(cur_state), to_mhi_pm_state_str(state));
 
-	if (MHI_REG_ACCESS_VALID(cur_state) || MHI_REG_ACCESS_VALID(state))
-		mhi_timesync_log(mhi_cntrl);
-
 	mhi_cntrl->pm_state = state;
 	return mhi_cntrl->pm_state;
 }
@@ -202,10 +199,12 @@ void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
 		spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
 	} else {
 		/* if resources requested already, then increment and exit */
-		if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
-			return;
-
 		spin_lock_irqsave(&mhi_cntrl->wlock, flags);
+		if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0))) {
+			spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
+			return;
+		}
+
 		if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) &&
 		    MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) &&
 		    !mhi_cntrl->wake_set) {
@@ -225,15 +224,20 @@ void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl, bool override)
 		   atomic_read(&mhi_cntrl->dev_wake) == 0), "dev_wake == 0");
 
 	/* resources not dropping to 0, decrement and exit */
-	if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
-		return;
-
 	spin_lock_irqsave(&mhi_cntrl->wlock, flags);
+	if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1))) {
+		if (!override)
+			mhi_cntrl->ignore_override = true;
+		spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
+		return;
+	}
+
 	if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
-	    MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override &&
-	    mhi_cntrl->wake_set) {
+	    MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && (!override ||
+	    mhi_cntrl->ignore_override) && mhi_cntrl->wake_set) {
 		mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0);
 		mhi_cntrl->wake_set = false;
+		mhi_cntrl->ignore_override = false;
 	}
 	spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
 }
@@ -506,6 +510,9 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
 	/* setup support for time sync */
 	mhi_init_timesync(mhi_cntrl);
 
+	if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
+		mhi_timesync_log(mhi_cntrl);
+
 	MHI_LOG("Adding new devices\n");
 
 	/* add supported devices */
@@ -542,20 +549,9 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
 		to_mhi_pm_state_str(transition_state));
 
 	/* We must notify MHI control driver so it can clean up first */
-	if (transition_state == MHI_PM_SYS_ERR_PROCESS) {
-		/*
-		 * if controller support rddm, we do not process
-		 * sys error state, instead we will jump directly
-		 * to rddm state
-		 */
-		if (mhi_cntrl->rddm_image) {
-			MHI_LOG(
-				"Controller Support RDDM, skipping SYS_ERR_PROCESS\n");
-			return;
-		}
+	if (transition_state == MHI_PM_SYS_ERR_PROCESS)
 		mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
 				     MHI_CB_SYS_ERROR);
-	}
 
 	mutex_lock(&mhi_cntrl->pm_mutex);
 	write_lock_irq(&mhi_cntrl->pm_lock);
@@ -628,7 +624,6 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
 
 	MHI_LOG("Waiting for all pending threads to complete\n");
 	wake_up_all(&mhi_cntrl->state_event);
-	flush_work(&mhi_cntrl->st_worker);
 	flush_work(&mhi_cntrl->fw_worker);
 	flush_work(&mhi_cntrl->low_priority_worker);
 
@@ -718,7 +713,28 @@ int mhi_debugfs_trigger_reset(void *data, u64 val)
 	write_unlock_irq(&mhi_cntrl->pm_lock);
 
 	if (cur_state == MHI_PM_SYS_ERR_DETECT)
-		schedule_work(&mhi_cntrl->syserr_worker);
+		mhi_process_sys_err(mhi_cntrl);
+
+	return 0;
+}
+
+/* queue disable transition work item */
+int mhi_queue_disable_transition(struct mhi_controller *mhi_cntrl,
+				 enum MHI_PM_STATE pm_state)
+{
+	struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC);
+	unsigned long flags;
+
+	if (!item)
+		return -ENOMEM;
+
+	item->pm_state = pm_state;
+	item->state = MHI_ST_TRANSITION_DISABLE;
+	spin_lock_irqsave(&mhi_cntrl->transition_lock, flags);
+	list_add_tail(&item->node, &mhi_cntrl->transition_list);
+	spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
+
+	schedule_work(&mhi_cntrl->st_worker);
 
 	return 0;
 }
@@ -781,17 +797,18 @@ void mhi_low_priority_worker(struct work_struct *work)
 	}
 }
 
-void mhi_pm_sys_err_worker(struct work_struct *work)
+void mhi_process_sys_err(struct mhi_controller *mhi_cntrl)
 {
-	struct mhi_controller *mhi_cntrl = container_of(work,
-							struct mhi_controller,
-							syserr_worker);
+	/*
+	 * if controller supports rddm, we do not process sys error state,
+	 * instead we will jump directly to rddm state
+	 */
+	if (mhi_cntrl->rddm_image) {
+		MHI_LOG("Controller supports RDDM, skipping SYS_ERR_PROCESS\n");
+		return;
+	}
 
-	MHI_LOG("Enter with pm_state:%s MHI_STATE:%s\n",
-		to_mhi_pm_state_str(mhi_cntrl->pm_state),
-		TO_MHI_STATE_STR(mhi_cntrl->dev_state));
-
-	mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
+	mhi_queue_disable_transition(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
 }
 
 void mhi_pm_st_worker(struct work_struct *work)
@@ -832,6 +849,9 @@ void mhi_pm_st_worker(struct work_struct *work)
 		case MHI_ST_TRANSITION_READY:
 			mhi_ready_state_transition(mhi_cntrl);
 			break;
+		case MHI_ST_TRANSITION_DISABLE:
+			mhi_pm_disable_transition(mhi_cntrl, itr->pm_state);
+			break;
 		default:
 			break;
 		}
@@ -1008,7 +1028,11 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
 
 		transition_state = MHI_PM_SHUTDOWN_NO_ACCESS;
 	}
-	mhi_pm_disable_transition(mhi_cntrl, transition_state);
+
+	mhi_queue_disable_transition(mhi_cntrl, transition_state);
+
+	MHI_LOG("Wait for shutdown to complete\n");
+	flush_work(&mhi_cntrl->st_worker);
 
 	mhi_deinit_debugfs(mhi_cntrl);
 
diff --git a/drivers/bus/mhi/devices/mhi_satellite.c b/drivers/bus/mhi/devices/mhi_satellite.c
index 4088b24..2462147 100644
--- a/drivers/bus/mhi/devices/mhi_satellite.c
+++ b/drivers/bus/mhi/devices/mhi_satellite.c
@@ -787,8 +787,10 @@ static int mhi_sat_rpmsg_cb(struct rpmsg_device *rpdev, void *data, int len,
 
 	/* find controller packet was sent for */
 	sat_cntrl = find_sat_cntrl_by_id(subsys, hdr->dev_id);
-
-	MHI_SAT_ASSERT(!sat_cntrl, "Packet for unknown device!\n");
+	if (!sat_cntrl) {
+		MHI_SAT_ERR("Message for unknown device!\n");
+		return 0;
+	}
 
 	/* handle events directly regardless of controller active state */
 	if (hdr->msg_id == SAT_MSG_ID_EVT) {
diff --git a/drivers/char/diag/diag_usb.c b/drivers/char/diag/diag_usb.c
index 682c7a6..772456e 100644
--- a/drivers/char/diag/diag_usb.c
+++ b/drivers/char/diag/diag_usb.c
@@ -395,14 +395,16 @@ static void diag_usb_notifier(void *priv, unsigned int event,
 
 	switch (event) {
 	case USB_DIAG_CONNECT:
-		pr_info("diag: USB channel %s: Received Connect event\n",
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: USB channel %s: Received Connect event\n",
 			usb_info->name);
 		diag_usb_event_add(usb_info, USB_DIAG_CONNECT);
 		queue_work(usb_info->usb_wq,
 			   &usb_info->event_work);
 		break;
 	case USB_DIAG_DISCONNECT:
-		pr_info("diag: USB channel %s: Received Disconnect event\n",
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: USB channel %s: Received Disconnect event\n",
 			usb_info->name);
 		diag_usb_event_add(usb_info, USB_DIAG_DISCONNECT);
 		queue_work(usb_info->usb_wq,
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 7c7af1e..31769e7 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -3093,9 +3093,14 @@ static int diag_process_apps_data_hdlc(unsigned char *buf, int len,
 	send.last = (void *)(buf + len - 1);
 	send.terminate = 1;
 
+wait_for_buffer:
 	wait_event_interruptible(driver->hdlc_wait_q,
 			(data->flushed == 0));
 	spin_lock_irqsave(&driver->diagmem_lock, flags);
+	if (data->flushed) {
+		spin_unlock_irqrestore(&driver->diagmem_lock, flags);
+		goto wait_for_buffer;
+	}
 	if (!data->buf) {
 		data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
 					APF_DIAG_PADDING,
@@ -3118,19 +3123,7 @@ static int diag_process_apps_data_hdlc(unsigned char *buf, int len,
 			ret = -EIO;
 			goto fail_free_buf;
 		}
-		wait_event_interruptible(driver->hdlc_wait_q,
-			(data->flushed == 0));
-		spin_lock_irqsave(&driver->diagmem_lock, flags);
-		data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
-					APF_DIAG_PADDING,
-					  POOL_TYPE_HDLC);
-		if (!data->buf) {
-			ret = PKT_DROP;
-			spin_unlock_irqrestore(&driver->diagmem_lock, flags);
-			goto fail_ret;
-		}
-		data->allocated = 1;
-		data->flushed = 0;
+		goto wait_for_buffer;
 	}
 
 	enc.dest = data->buf + data->len;
@@ -3152,9 +3145,14 @@ static int diag_process_apps_data_hdlc(unsigned char *buf, int len,
 			ret = -EIO;
 			goto fail_free_buf;
 		}
+wait_for_agg_buff:
 		wait_event_interruptible(driver->hdlc_wait_q,
 			(data->flushed == 0));
 		spin_lock_irqsave(&driver->diagmem_lock, flags);
+		if (data->flushed) {
+			spin_unlock_irqrestore(&driver->diagmem_lock, flags);
+			goto wait_for_agg_buff;
+		}
 		data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
 					APF_DIAG_PADDING,
 					 POOL_TYPE_HDLC);
@@ -3224,9 +3222,14 @@ static int diag_process_apps_data_non_hdlc(unsigned char *buf, int len,
 		       __func__, buf, len);
 		return -EIO;
 	}
+wait_for_buffer:
 	wait_event_interruptible(driver->hdlc_wait_q,
 			(data->flushed == 0));
 	spin_lock_irqsave(&driver->diagmem_lock, flags);
+	if (data->flushed) {
+		spin_unlock_irqrestore(&driver->diagmem_lock, flags);
+		goto wait_for_buffer;
+	}
 	if (!data->buf) {
 		data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
 					APF_DIAG_PADDING,
@@ -3248,20 +3251,7 @@ static int diag_process_apps_data_non_hdlc(unsigned char *buf, int len,
 			ret = -EIO;
 			goto fail_free_buf;
 		}
-		wait_event_interruptible(driver->hdlc_wait_q,
-			(data->flushed == 0));
-
-		spin_lock_irqsave(&driver->diagmem_lock, flags);
-		data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
-					APF_DIAG_PADDING,
-					  POOL_TYPE_HDLC);
-		if (!data->buf) {
-			ret = PKT_DROP;
-			spin_unlock_irqrestore(&driver->diagmem_lock, flags);
-			goto fail_ret;
-		}
-		data->allocated = 1;
-		data->flushed = 0;
+		goto wait_for_buffer;
 	}
 
 	header.start = CONTROL_CHAR;
diff --git a/drivers/char/diag/diagfwd_bridge.c b/drivers/char/diag/diagfwd_bridge.c
index 374d185..4ef4776 100644
--- a/drivers/char/diag/diagfwd_bridge.c
+++ b/drivers/char/diag/diagfwd_bridge.c
@@ -86,8 +86,6 @@ static int diagfwd_bridge_mux_connect(int id, int mode)
 {
 	if (id < 0 || id >= NUM_REMOTE_DEV)
 		return -EINVAL;
-	if (bridge_info[id].dev_ops && bridge_info[id].dev_ops->open)
-		bridge_info[id].dev_ops->open(id, bridge_info[id].ctxt);
 	return 0;
 }
 
diff --git a/drivers/clk/qcom/dispcc-bengal.c b/drivers/clk/qcom/dispcc-bengal.c
index dfc5318..8c1d82c 100644
--- a/drivers/clk/qcom/dispcc-bengal.c
+++ b/drivers/clk/qcom/dispcc-bengal.c
@@ -78,7 +78,7 @@ static const struct parent_map disp_cc_parent_map_3[] = {
 
 static const char * const disp_cc_parent_names_3[] = {
 	"bi_tcxo",
-	"gpll0_out_main",
+	"gcc_disp_gpll0_div_clk_src",
 	"core_bi_pll_test_se",
 };
 
diff --git a/drivers/clk/qcom/gcc-bengal.c b/drivers/clk/qcom/gcc-bengal.c
index 7d5942c..ba18207 100644
--- a/drivers/clk/qcom/gcc-bengal.c
+++ b/drivers/clk/qcom/gcc-bengal.c
@@ -1856,7 +1856,7 @@ static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
 };
 
 static const struct freq_tbl ftbl_gcc_video_venus_clk_src[] = {
-	F(133000000, P_GPLL11_OUT_MAIN, 4.5, 0, 0),
+	F(133333333, P_GPLL11_OUT_MAIN, 4.5, 0, 0),
 	F(240000000, P_GPLL11_OUT_MAIN, 2.5, 0, 0),
 	F(300000000, P_GPLL11_OUT_MAIN, 2, 0, 0),
 	F(384000000, P_GPLL11_OUT_MAIN, 2, 0, 0),
@@ -2590,6 +2590,19 @@ static struct clk_branch gcc_disp_ahb_clk = {
 	},
 };
 
+static struct clk_regmap_div gcc_disp_gpll0_clk_src = {
+	.reg = 0x17058,
+	.shift = 0,
+	.width = 2,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "gcc_disp_gpll0_clk_src",
+		.parent_names =
+			(const char *[]){ "gpll0" },
+		.num_parents = 1,
+		.ops = &clk_regmap_div_ops,
+	},
+};
+
 static struct clk_branch gcc_disp_gpll0_div_clk_src = {
 	.halt_check = BRANCH_HALT_DELAY,
 	.clkr = {
@@ -2598,7 +2611,7 @@ static struct clk_branch gcc_disp_gpll0_div_clk_src = {
 		.hw.init = &(struct clk_init_data){
 			.name = "gcc_disp_gpll0_div_clk_src",
 			.parent_names = (const char *[]){
-				"gpll0",
+				"gcc_disp_gpll0_clk_src",
 			},
 			.num_parents = 1,
 			.flags = CLK_SET_RATE_PARENT,
@@ -3701,6 +3714,7 @@ static struct clk_regmap *gcc_bengal_clocks[] = {
 	[GCC_CPUSS_THROTTLE_CORE_CLK] = &gcc_cpuss_throttle_core_clk.clkr,
 	[GCC_CPUSS_THROTTLE_XO_CLK] = &gcc_cpuss_throttle_xo_clk.clkr,
 	[GCC_DISP_AHB_CLK] = &gcc_disp_ahb_clk.clkr,
+	[GCC_DISP_GPLL0_CLK_SRC] = &gcc_disp_gpll0_clk_src.clkr,
 	[GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
 	[GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
 	[GCC_DISP_THROTTLE_CORE_CLK] = &gcc_disp_throttle_core_clk.clkr,
@@ -3818,6 +3832,8 @@ static const struct qcom_reset_map gcc_bengal_resets[] = {
 	[GCC_UFS_PHY_BCR] = { 0x45000 },
 	[GCC_USB30_PRIM_BCR] = { 0x1a000 },
 	[GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x1d000 },
+	[GCC_USB3PHY_PHY_PRIM_SP0_BCR] = { 0x1b008 },
+	[GCC_USB3_PHY_PRIM_SP0_BCR] = { 0x1b000 },
 	[GCC_VCODEC0_BCR] = { 0x58094 },
 	[GCC_VENUS_BCR] = { 0x58078 },
 	[GCC_VIDEO_INTERFACE_BCR] = { 0x6e000 },
diff --git a/drivers/clk/qcom/gpucc-bengal.c b/drivers/clk/qcom/gpucc-bengal.c
index 1bba610..939568d0 100644
--- a/drivers/clk/qcom/gpucc-bengal.c
+++ b/drivers/clk/qcom/gpucc-bengal.c
@@ -53,8 +53,8 @@ static const char * const gpu_cc_parent_names_0[] = {
 	"bi_tcxo",
 	"gpu_cc_pll0_out_main",
 	"gpu_cc_pll1_out_main",
-	"gpll0",
-	"gpll0_out_main_div",
+	"gcc_gpu_gpll0_clk_src",
+	"gcc_gpu_gpll0_div_clk_src",
 	"core_bi_pll_test_se",
 };
 
@@ -199,7 +199,7 @@ static struct clk_alpha_pll_postdiv gpu_cc_pll1_out_aux = {
 };
 
 static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
-	F(200000000, P_GPLL0_OUT_MAIN, 1.5, 0, 0),
+	F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
 	{ }
 };
 
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index a8f8766..136c9c2 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2620,14 +2620,6 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
 }
 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
 
-/*
- * Stop cpufreq at shutdown to make sure it isn't holding any locks
- * or mutexes when secondary CPUs are halted.
- */
-static struct syscore_ops cpufreq_syscore_ops = {
-	.shutdown = cpufreq_suspend,
-};
-
 struct kobject *cpufreq_global_kobject;
 EXPORT_SYMBOL(cpufreq_global_kobject);
 
@@ -2639,8 +2631,6 @@ static int __init cpufreq_core_init(void)
 	cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
 	BUG_ON(!cpufreq_global_kobject);
 
-	register_syscore_ops(&cpufreq_syscore_ops);
-
 	return 0;
 }
 module_param(off, int, 0444);
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 75c2c15..117960f 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -23,8 +23,7 @@
 #define CORE_COUNT_VAL(val)		(((val) & (GENMASK(18, 16))) >> 16)
 #define LUT_ROW_SIZE			32
 #define CLK_HW_DIV			2
-#define EQ_IRQ_STATUS			BIT(0)
-#define LT_IRQ_STATUS			BIT(1)
+#define GT_IRQ_STATUS			BIT(2)
 #define MAX_FN_SIZE			12
 #define LIMITS_POLLING_DELAY_MS		10
 
@@ -37,7 +36,7 @@ enum {
 	REG_VOLT_LUT_TABLE,
 	REG_PERF_STATE,
 	REG_CYCLE_CNTR,
-	REG_LLM_DCVS_VC_VOTE,
+	REG_DOMAIN_STATE,
 	REG_INTR_EN,
 	REG_INTR_CLR,
 	REG_INTR_STATUS,
@@ -86,7 +85,7 @@ static const u16 cpufreq_qcom_epss_std_offsets[REG_ARRAY_SIZE] = {
 	[REG_VOLT_LUT_TABLE]	= 0x200,
 	[REG_PERF_STATE]	= 0x320,
 	[REG_CYCLE_CNTR]	= 0x3c4,
-	[REG_LLM_DCVS_VC_VOTE]	= 0x024,
+	[REG_DOMAIN_STATE]	= 0x020,
 	[REG_INTR_EN]		= 0x304,
 	[REG_INTR_CLR]		= 0x308,
 	[REG_INTR_STATUS]	= 0x30C,
@@ -95,6 +94,8 @@ static const u16 cpufreq_qcom_epss_std_offsets[REG_ARRAY_SIZE] = {
 static struct cpufreq_counter qcom_cpufreq_counter[NR_CPUS];
 static struct cpufreq_qcom *qcom_freq_domain_map[NR_CPUS];
 
+static unsigned int qcom_cpufreq_hw_get(unsigned int cpu);
+
 static ssize_t dcvsh_freq_limit_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
@@ -105,48 +106,40 @@ static ssize_t dcvsh_freq_limit_show(struct device *dev,
 
 static unsigned long limits_mitigation_notify(struct cpufreq_qcom *c)
 {
-	int i;
-	u32 max_vc;
+	unsigned long freq;
 
-	max_vc = readl_relaxed(c->reg_bases[REG_LLM_DCVS_VC_VOTE]) &
-						GENMASK(13, 8);
+	freq = readl_relaxed(c->reg_bases[REG_DOMAIN_STATE]) &
+				GENMASK(7, 0);
+	freq = DIV_ROUND_CLOSEST_ULL(freq * c->xo_rate, 1000);
 
-	for (i = 0; i < lut_max_entries; i++) {
-		if (c->table[i].driver_data != max_vc)
-			continue;
-		else {
-			sched_update_cpu_freq_min_max(&c->related_cpus, 0,
-					c->table[i].frequency);
-			trace_dcvsh_freq(cpumask_first(&c->related_cpus),
-						c->table[i].frequency);
-			c->dcvsh_freq_limit = c->table[i].frequency;
-			return c->table[i].frequency;
-		}
-	}
+	sched_update_cpu_freq_min_max(&c->related_cpus, 0, freq);
+	trace_dcvsh_freq(cpumask_first(&c->related_cpus), freq);
+	c->dcvsh_freq_limit = freq;
 
-	return 0;
+	return freq;
 }
 
 static void limits_dcvsh_poll(struct work_struct *work)
 {
 	struct cpufreq_qcom *c = container_of(work, struct cpufreq_qcom,
 						freq_poll_work.work);
-	struct cpufreq_policy *policy;
-	unsigned long freq_limit;
+	unsigned long freq_limit, dcvsh_freq;
 	u32 regval, cpu;
 
 	mutex_lock(&c->dcvsh_lock);
 
 	cpu = cpumask_first(&c->related_cpus);
-	policy = cpufreq_cpu_get_raw(cpu);
 
 	freq_limit = limits_mitigation_notify(c);
-	if (freq_limit != policy->cpuinfo.max_freq || !freq_limit) {
+
+	dcvsh_freq = qcom_cpufreq_hw_get(cpu);
+
+	if (freq_limit != dcvsh_freq) {
 		mod_delayed_work(system_highpri_wq, &c->freq_poll_work,
 				msecs_to_jiffies(LIMITS_POLLING_DELAY_MS));
 	} else {
 		regval = readl_relaxed(c->reg_bases[REG_INTR_CLR]);
-		regval &= ~LT_IRQ_STATUS;
+		regval |= GT_IRQ_STATUS;
 		writel_relaxed(regval, c->reg_bases[REG_INTR_CLR]);
 
 		c->is_irq_enabled = true;
@@ -162,7 +155,7 @@ static irqreturn_t dcvsh_handle_isr(int irq, void *data)
 	u32 regval;
 
 	regval = readl_relaxed(c->reg_bases[REG_INTR_STATUS]);
-	if (!(regval & LT_IRQ_STATUS))
+	if (!(regval & GT_IRQ_STATUS))
 		return IRQ_HANDLED;
 
 	mutex_lock(&c->dcvsh_lock);
@@ -301,7 +294,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
 					"dcvsh-irq-%d", policy->cpu);
 		ret = devm_request_threaded_irq(cpu_dev, c->dcvsh_irq, NULL,
 			dcvsh_handle_isr, IRQF_TRIGGER_HIGH | IRQF_ONESHOT |
-			IRQF_NO_SUSPEND | IRQF_SHARED, c->dcvsh_irq_name, c);
+			IRQF_NO_SUSPEND, c->dcvsh_irq_name, c);
 		if (ret) {
 			dev_err(cpu_dev, "Failed to register irq %d\n", ret);
 			return ret;
@@ -309,7 +302,6 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
 
 		c->is_irq_requested = true;
 		c->is_irq_enabled = true;
-		writel_relaxed(LT_IRQ_STATUS, c->reg_bases[REG_INTR_EN]);
 		c->freq_limit_attr.attr.name = "dcvsh_freq_limit";
 		c->freq_limit_attr.show = dcvsh_freq_limit_show;
 		c->freq_limit_attr.attr.mode = 0444;
@@ -401,7 +393,6 @@ static int qcom_cpufreq_hw_read_lut(struct platform_device *pdev,
 		else
 			c->table[i].frequency = c->cpu_hw_rate / 1000;
 
-		c->table[i].driver_data = vc;
 		cur_freq = c->table[i].frequency;
 
 		dev_dbg(dev, "index=%d freq=%d, core_count %d\n",
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index a408edd..edacf9b 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -509,6 +509,7 @@ void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
 			       const bool is_qi, int era)
 {
 	u32 geniv, moveiv;
+	u32 *wait_cmd;
 
 	/* Note: Context registers are saved. */
 	init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
@@ -604,6 +605,14 @@ void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
 
 	/* Will read cryptlen */
 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/*
+	 * Wait for IV transfer (ofifo -> class2) to finish before starting
+	 * ciphertext transfer (ofifo -> external memory).
+	 */
+	wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NIFP);
+	set_jump_tgt_here(desc, wait_cmd);
+
 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
 			     FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index a917af5..05516b0 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -12,7 +12,7 @@
 #define DESC_AEAD_BASE			(4 * CAAM_CMD_SZ)
 #define DESC_AEAD_ENC_LEN		(DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
 #define DESC_AEAD_DEC_LEN		(DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
-#define DESC_AEAD_GIVENC_LEN		(DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+#define DESC_AEAD_GIVENC_LEN		(DESC_AEAD_ENC_LEN + 8 * CAAM_CMD_SZ)
 #define DESC_QI_AEAD_ENC_LEN		(DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ)
 #define DESC_QI_AEAD_DEC_LEN		(DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
 #define DESC_QI_AEAD_GIVENC_LEN		(DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c
index 6183f91..ea901bc 100644
--- a/drivers/crypto/cavium/zip/zip_main.c
+++ b/drivers/crypto/cavium/zip/zip_main.c
@@ -593,6 +593,7 @@ static const struct file_operations zip_stats_fops = {
 	.owner = THIS_MODULE,
 	.open  = zip_stats_open,
 	.read  = seq_read,
+	.release = single_release,
 };
 
 static int zip_clear_open(struct inode *inode, struct file *file)
@@ -604,6 +605,7 @@ static const struct file_operations zip_clear_fops = {
 	.owner = THIS_MODULE,
 	.open  = zip_clear_open,
 	.read  = seq_read,
+	.release = single_release,
 };
 
 static int zip_regs_open(struct inode *inode, struct file *file)
@@ -615,6 +617,7 @@ static const struct file_operations zip_regs_fops = {
 	.owner = THIS_MODULE,
 	.open  = zip_regs_open,
 	.read  = seq_read,
+	.release = single_release,
 };
 
 /* Root directory for thunderx_zip debugfs entry */
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 0669033..aa6b45b 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -227,7 +227,7 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err)
 			/* In case of payload authentication failure, MUST NOT
 			 * revealed the decrypted message --> zero its memory.
 			 */
-			cc_zero_sgl(areq->dst, areq_ctx->cryptlen);
+			cc_zero_sgl(areq->dst, areq->cryptlen);
 			err = -EBADMSG;
 		}
 	} else { /*ENCRYPT*/
diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c
index 09f708f..bac278d 100644
--- a/drivers/crypto/ccree/cc_fips.c
+++ b/drivers/crypto/ccree/cc_fips.c
@@ -21,7 +21,13 @@ static bool cc_get_tee_fips_status(struct cc_drvdata *drvdata)
 	u32 reg;
 
 	reg = cc_ioread(drvdata, CC_REG(GPR_HOST));
-	return (reg == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK));
+	/* Did the TEE report status? */
+	if (reg & CC_FIPS_SYNC_TEE_STATUS)
+		/* Yes. Is it OK? */
+		return (reg & CC_FIPS_SYNC_MODULE_OK);
+
+	/* No. It's either not in use or will be reported later */
+	return true;
 }
 
 /*
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 5c4c0a2..d78f8d5 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -95,7 +95,7 @@ struct service_hndl {
 
 static inline int get_current_node(void)
 {
-	return topology_physical_package_id(smp_processor_id());
+	return topology_physical_package_id(raw_smp_processor_id());
 }
 
 int adf_service_register(struct service_hndl *service);
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 43fd8aa..bc43003 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -238,6 +238,14 @@
 	  driver votes on this interface to request a particular
 	  memory latency QoS level.
 
+config DEVFREQ_GOV_STATICMAP
+	tristate "Device driver for static voting of DDR freq based on a clock rate change"
+	depends on ARCH_QCOM
+	help
+	  Clock notifier based governor for device to DDR Bandwidth voting.
+	  This governor votes for the DDR BW based on the device's clock rate
+	  change.
+
 source "drivers/devfreq/event/Kconfig"
 
 endif # PM_DEVFREQ
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index e106e9c..c511cf0 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -24,6 +24,7 @@
 obj-$(CONFIG_QCOM_DEVFREQ_DEVBW)		+= devfreq_devbw.o
 obj-$(CONFIG_DEVFREQ_SIMPLE_DEV)	+= devfreq_simple_dev.o
 obj-$(CONFIG_ARM_QCOM_DEVFREQ_QOSLAT)	+= devfreq_qcom_qoslat.o
+obj-$(CONFIG_DEVFREQ_GOV_STATICMAP)	+= governor_staticmap.o
 
 # DEVFREQ Event Drivers
 obj-$(CONFIG_PM_DEVFREQ_EVENT)		+= event/
diff --git a/drivers/devfreq/devfreq_qcom_qoslat.c b/drivers/devfreq/devfreq_qcom_qoslat.c
index 3c73985..a751541 100644
--- a/drivers/devfreq/devfreq_qcom_qoslat.c
+++ b/drivers/devfreq/devfreq_qcom_qoslat.c
@@ -27,6 +27,9 @@ struct qoslat_data {
 	unsigned int			qos_level;
 };
 
+#define QOS_LEVEL_OFF	1
+#define QOS_LEVEL_ON	2
+
 #define MAX_MSG_LEN	96
 static int update_qos_level(struct device *dev, struct qoslat_data *d)
 {
@@ -35,7 +38,7 @@ static int update_qos_level(struct device *dev, struct qoslat_data *d)
 	char *qos_msg = "off";
 	int ret;
 
-	if (d->qos_level)
+	if (d->qos_level == QOS_LEVEL_ON)
 		qos_msg = "on";
 
 	snprintf(mbox_msg, MAX_MSG_LEN, "{class: ddr, perfmode: %s}", qos_msg);
@@ -106,7 +109,7 @@ static int devfreq_qcom_qoslat_probe(struct platform_device *pdev)
 		dev_err(dev, "Failed to get mailbox channel: %d\n", ret);
 		return ret;
 	}
-	d->qos_level = 0;
+	d->qos_level = QOS_LEVEL_OFF;
 
 	p = &d->profile;
 	p->target = dev_target;
diff --git a/drivers/devfreq/governor_staticmap.c b/drivers/devfreq/governor_staticmap.c
new file mode 100644
index 0000000..34ab108
--- /dev/null
+++ b/drivers/devfreq/governor_staticmap.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "governor-static-map: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#include "governor.h"
+
+struct core_dev_map {
+	unsigned int core_mhz;
+	unsigned int target_freq;
+};
+
+struct static_map_gov {
+	struct device		*dev;
+	struct device_node	*of_node;
+	struct clk		*dev_clk;
+	unsigned long		dev_clk_cur_freq;
+	struct notifier_block	clock_change_nb;
+	struct core_dev_map	*freq_map;
+	struct devfreq_governor	*gov;
+	struct devfreq		*df;
+	bool			mon_started;
+	struct list_head	list;
+	void			*orig_data;
+	unsigned long		resume_freq;
+};
+
+static LIST_HEAD(static_map_list);
+static DEFINE_MUTEX(static_map_lock);
+static DEFINE_MUTEX(state_lock);
+static int static_use_cnt;
+
+static struct static_map_gov *find_static_map_node(struct devfreq *df)
+{
+	struct static_map_gov *node, *found = NULL;
+
+	mutex_lock(&static_map_lock);
+	list_for_each_entry(node, &static_map_list, list)
+		if (node->of_node == df->dev.parent->of_node) {
+			found = node;
+			break;
+		}
+	mutex_unlock(&static_map_lock);
+
+	return found;
+}
+
+static unsigned long core_to_dev_freq(struct static_map_gov *d,
+		unsigned long coref)
+{
+	struct core_dev_map *map = d->freq_map;
+	unsigned long freq = 0;
+
+	if (!map || !coref)
+		goto out;
+
+	/* Start with the first non-zero freq map entry */
+	map++;
+	while (map->core_mhz && map->core_mhz != coref)
+		map++;
+	if (!map->core_mhz)
+		map--;
+	freq = map->target_freq;
+
+out:
+	pr_debug("core freq: %lu -> target: %lu\n", coref, freq);
+	return freq;
+}
+
+#define NUM_COLS	2
+static struct core_dev_map *init_core_dev_map(struct device *dev,
+					struct device_node *of_node,
+					char *prop_name)
+{
+	int len, nf, i, j;
+	u32 data;
+	struct core_dev_map *tbl;
+	int ret;
+
+	if (!of_node)
+		of_node = dev->of_node;
+
+	if (!of_find_property(of_node, prop_name, &len))
+		return NULL;
+	len /= sizeof(data);
+
+	if (len % NUM_COLS || len == 0)
+		return NULL;
+	nf = len / NUM_COLS;
+
+	tbl = devm_kzalloc(dev, (nf + 1) * sizeof(struct core_dev_map),
+			GFP_KERNEL);
+	if (!tbl)
+		return NULL;
+
+	for (i = 0, j = 0; i < nf; i++, j += 2) {
+		ret = of_property_read_u32_index(of_node, prop_name, j,
+				&data);
+		if (ret) {
+			dev_err(dev,
+				"Couldn't read the core-dev freq table %d\n",
+									ret);
+			return NULL;
+		}
+		tbl[i].core_mhz = data;
+
+		ret = of_property_read_u32_index(of_node, prop_name, j + 1,
+				&data);
+		if (ret) {
+			dev_err(dev,
+				"Couldn't read the core-dev freq table %d\n",
+									ret);
+			return NULL;
+		}
+		tbl[i].target_freq = data;
+		pr_debug("Entry%d DEV:%u, Target:%u\n", i, tbl[i].core_mhz,
+				tbl[i].target_freq);
+	}
+	tbl[i].core_mhz = 0;
+
+	return tbl;
+}
+static int devfreq_static_map_get_freq(struct devfreq *df,
+					unsigned long *freq)
+{
+	struct static_map_gov *gov_node = df->data;
+
+	*freq = core_to_dev_freq(gov_node, gov_node->dev_clk_cur_freq);
+
+	return 0;
+}
+static int devfreq_clock_change_notify_cb(struct notifier_block *nb,
+				       unsigned long action, void *ptr)
+{
+	struct clk_notifier_data *data = ptr;
+	struct static_map_gov *d;
+	int ret;
+
+	if (action != POST_RATE_CHANGE)
+		return NOTIFY_OK;
+
+	mutex_lock(&state_lock);
+	d = container_of(nb, struct static_map_gov, clock_change_nb);
+
+	mutex_lock(&d->df->lock);
+	d->dev_clk_cur_freq = data->new_rate;
+	if (IS_ERR_VALUE(d->dev_clk_cur_freq)) {
+		mutex_unlock(&d->df->lock);
+		mutex_unlock(&state_lock);
+		return d->dev_clk_cur_freq;
+	}
+	d->dev_clk_cur_freq = d->dev_clk_cur_freq / 1000;
+
+	ret = update_devfreq(d->df);
+	if (ret)
+		dev_err(d->dev,
+			"Unable to update freq on request %d\n", ret);
+	mutex_unlock(&d->df->lock);
+	mutex_unlock(&state_lock);
+
+	return 0;
+}
+
+static int devfreq_static_map_ev_handler(struct devfreq *df,
+					unsigned int event, void *data)
+{
+	int ret = 0;
+	struct static_map_gov *gov_node;
+
+	mutex_lock(&state_lock);
+	gov_node = find_static_map_node(df);
+	if (!gov_node) {
+		mutex_unlock(&state_lock);
+		dev_err(df->dev.parent,
+				"Unable to find static map governor!\n");
+		return -ENODEV;
+	}
+
+	switch (event) {
+	case DEVFREQ_GOV_START:
+		gov_node->clock_change_nb.notifier_call =
+						devfreq_clock_change_notify_cb;
+		gov_node->orig_data = df->data;
+		gov_node->df = df;
+		df->data = gov_node;
+		ret = clk_notifier_register(gov_node->dev_clk,
+					&gov_node->clock_change_nb);
+		if (ret) {
+			dev_err(df->dev.parent,
+				"Failed to register clock change notifier %d\n",
+									ret);
+		}
+		break;
+	case DEVFREQ_GOV_STOP:
+		ret = clk_notifier_unregister(gov_node->dev_clk,
+						&gov_node->clock_change_nb);
+		if (ret) {
+			dev_err(df->dev.parent,
+				"Failed to register clock change notifier %d\n",
+									ret);
+		}
+		df->data = gov_node->orig_data;
+		gov_node->orig_data = NULL;
+		break;
+	case DEVFREQ_GOV_SUSPEND:
+		ret = clk_notifier_unregister(gov_node->dev_clk,
+						&gov_node->clock_change_nb);
+		if (ret) {
+			dev_err(df->dev.parent,
+				"Failed to unregister clk notifier %d\n", ret);
+		}
+		mutex_lock(&df->lock);
+		gov_node->resume_freq = gov_node->dev_clk_cur_freq;
+		gov_node->dev_clk_cur_freq = 0;
+		update_devfreq(df);
+		mutex_unlock(&df->lock);
+		break;
+	case DEVFREQ_GOV_RESUME:
+		ret = clk_notifier_register(gov_node->dev_clk,
+						&gov_node->clock_change_nb);
+		if (ret) {
+			dev_err(df->dev.parent,
+				"Failed to register clock change notifier %d\n",
+									ret);
+		}
+		mutex_lock(&df->lock);
+		gov_node->dev_clk_cur_freq = gov_node->resume_freq;
+		update_devfreq(df);
+		gov_node->resume_freq = 0;
+		mutex_unlock(&df->lock);
+		break;
+	default:
+		break;
+	}
+	mutex_unlock(&state_lock);
+	return ret;
+}
+static struct devfreq_governor devfreq_gov_static_map = {
+	.name = "static_map",
+	.get_target_freq = devfreq_static_map_get_freq,
+	.event_handler = devfreq_static_map_ev_handler,
+};
+
+static int gov_static_map_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct static_map_gov *d;
+	int ret;
+	const char *dev_clk_name;
+
+	d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+	if (!d)
+		return -ENOMEM;
+	d->dev = dev;
+
+	ret = of_property_read_string(dev->of_node, "qcom,dev_clk",
+							&dev_clk_name);
+	if (ret) {
+		dev_err(&pdev->dev,
+			"Failed to read device clock name %d\n", ret);
+		return ret;
+	}
+	d->dev_clk = devm_clk_get(dev, dev_clk_name);
+	if (IS_ERR(d->dev_clk))
+		return PTR_ERR(d->dev_clk);
+
+	d->of_node = of_parse_phandle(dev->of_node, "qcom,target-dev", 0);
+	if (!d->of_node) {
+		dev_err(dev, "Couldn't find a target device.\n");
+		ret = -ENODEV;
+		return ret;
+	}
+
+	d->freq_map = init_core_dev_map(dev, NULL, "qcom,core-dev-table");
+	if (!d->freq_map) {
+		dev_err(dev, "Couldn't find the core-dev freq table!\n");
+		return -EINVAL;
+	}
+	mutex_lock(&static_map_lock);
+	list_add_tail(&d->list, &static_map_list);
+	mutex_unlock(&static_map_lock);
+
+	mutex_lock(&state_lock);
+	d->gov = &devfreq_gov_static_map;
+	if (!static_use_cnt)
+		ret = devfreq_add_governor(&devfreq_gov_static_map);
+	if (ret)
+		dev_err(dev, "Failed to add governor %d\n", ret);
+	if (!ret)
+		static_use_cnt++;
+	mutex_unlock(&state_lock);
+
+	return ret;
+}
+
+static const struct of_device_id static_map_match_table[] = {
+	{ .compatible = "qcom,static-map"},
+	{}
+};
+
+static struct platform_driver gov_static_map_driver = {
+	.probe = gov_static_map_probe,
+	.driver = {
+		.name = "static-map",
+		.of_match_table = static_map_match_table,
+		.suppress_bind_attrs = true,
+	},
+};
+
+module_platform_driver(gov_static_map_driver);
+MODULE_DESCRIPTION("STATIC MAP GOVERNOR FOR DDR");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
index c59d2ee..0676807 100644
--- a/drivers/devfreq/tegra-devfreq.c
+++ b/drivers/devfreq/tegra-devfreq.c
@@ -486,11 +486,11 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
 {
 	struct tegra_devfreq *tegra = dev_get_drvdata(dev);
 	struct dev_pm_opp *opp;
-	unsigned long rate = *freq * KHZ;
+	unsigned long rate;
 
-	opp = devfreq_recommended_opp(dev, &rate, flags);
+	opp = devfreq_recommended_opp(dev, freq, flags);
 	if (IS_ERR(opp)) {
-		dev_err(dev, "Failed to find opp for %lu KHz\n", *freq);
+		dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
 		return PTR_ERR(opp);
 	}
 	rate = dev_pm_opp_get_freq(opp);
@@ -499,8 +499,6 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
 	clk_set_min_rate(tegra->emc_clock, rate);
 	clk_set_rate(tegra->emc_clock, 0);
 
-	*freq = rate;
-
 	return 0;
 }
 
@@ -510,7 +508,7 @@ static int tegra_devfreq_get_dev_status(struct device *dev,
 	struct tegra_devfreq *tegra = dev_get_drvdata(dev);
 	struct tegra_devfreq_device *actmon_dev;
 
-	stat->current_frequency = tegra->cur_freq;
+	stat->current_frequency = tegra->cur_freq * KHZ;
 
 	/* To be used by the tegra governor */
 	stat->private_data = tegra;
@@ -565,7 +563,7 @@ static int tegra_governor_get_target(struct devfreq *devfreq,
 		target_freq = max(target_freq, dev->target_freq);
 	}
 
-	*freq = target_freq;
+	*freq = target_freq * KHZ;
 
 	return 0;
 }
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 31e9fc1..c23b191 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -447,6 +447,83 @@ struct gpi_dev {
 	struct dentry *dentry;
 };
 
+static struct gpi_dev *gpi_dev_dbg;
+
+struct reg_info {
+	char *name;
+	u32 offset;
+	u32 val;
+};
+
+static const struct reg_info gpi_debug_ev_cntxt[] = {
+	{ "CONFIG", CNTXT_0_CONFIG },
+	{ "R_LENGTH", CNTXT_1_R_LENGTH },
+	{ "BASE_LSB", CNTXT_2_RING_BASE_LSB },
+	{ "BASE_MSB", CNTXT_3_RING_BASE_MSB },
+	{ "RP_LSB", CNTXT_4_RING_RP_LSB },
+	{ "RP_MSB", CNTXT_5_RING_RP_MSB },
+	{ "WP_LSB", CNTXT_6_RING_WP_LSB },
+	{ "WP_MSB", CNTXT_7_RING_WP_MSB },
+	{ "INT_MOD", CNTXT_8_RING_INT_MOD },
+	{ "INTVEC", CNTXT_9_RING_INTVEC },
+	{ "MSI_LSB", CNTXT_10_RING_MSI_LSB },
+	{ "MSI_MSB", CNTXT_11_RING_MSI_MSB },
+	{ "RP_UPDATE_LSB", CNTXT_12_RING_RP_UPDATE_LSB },
+	{ "RP_UPDATE_MSB", CNTXT_13_RING_RP_UPDATE_MSB },
+	{ NULL },
+};
+
+static const struct reg_info gpi_debug_ch_cntxt[] = {
+	{ "CONFIG", CNTXT_0_CONFIG },
+	{ "R_LENGTH", CNTXT_1_R_LENGTH },
+	{ "BASE_LSB", CNTXT_2_RING_BASE_LSB },
+	{ "BASE_MSB", CNTXT_3_RING_BASE_MSB },
+	{ "RP_LSB", CNTXT_4_RING_RP_LSB },
+	{ "RP_MSB", CNTXT_5_RING_RP_MSB },
+	{ "WP_LSB", CNTXT_6_RING_WP_LSB },
+	{ "WP_MSB", CNTXT_7_RING_WP_MSB },
+	{ NULL },
+};
+
+static const struct reg_info gpi_debug_regs[] = {
+	{ "DEBUG_PC", GPI_DEBUG_PC_FOR_DEBUG },
+	{ "SW_RF_10", GPI_DEBUG_SW_RF_n_READ(10) },
+	{ "SW_RF_11", GPI_DEBUG_SW_RF_n_READ(11) },
+	{ "SW_RF_12", GPI_DEBUG_SW_RF_n_READ(12) },
+	{ "SW_RF_21", GPI_DEBUG_SW_RF_n_READ(21) },
+	{ NULL },
+};
+
+static const struct reg_info gpi_debug_qsb_regs[] = {
+	{ "QSB_LOG_SEL", GPI_DEBUG_QSB_LOG_SEL },
+	{ "QSB_LOG_CLR", GPI_DEBUG_QSB_LOG_CLR },
+	{ "QSB_LOG_ERR_TRNS_ID", GPI_DEBUG_QSB_LOG_ERR_TRNS_ID },
+	{ "QSB_LOG_0", GPI_DEBUG_QSB_LOG_0 },
+	{ "QSB_LOG_1", GPI_DEBUG_QSB_LOG_1 },
+	{ "QSB_LOG_2", GPI_DEBUG_QSB_LOG_2 },
+	{ "LAST_MISC_ID_0", GPI_DEBUG_QSB_LOG_LAST_MISC_ID(0) },
+	{ "LAST_MISC_ID_1", GPI_DEBUG_QSB_LOG_LAST_MISC_ID(1) },
+	{ "LAST_MISC_ID_2", GPI_DEBUG_QSB_LOG_LAST_MISC_ID(2) },
+	{ "LAST_MISC_ID_3", GPI_DEBUG_QSB_LOG_LAST_MISC_ID(3) },
+	{ NULL },
+};
+
+struct gpi_reg_table {
+	u64 timestamp;
+	struct reg_info *ev_cntxt_info;
+	struct reg_info *chan[MAX_CHANNELS_PER_GPII];
+	struct reg_info *gpi_debug_regs;
+	struct reg_info *gpii_cntxt;
+	struct reg_info *gpi_debug_qsb_regs;
+	u32 ev_scratch_0;
+	u32 ch_scratch_0[MAX_CHANNELS_PER_GPII];
+	void *ev_ring;
+	u32 ev_ring_len;
+	void *ch_ring[MAX_CHANNELS_PER_GPII];
+	u32 ch_ring_len[MAX_CHANNELS_PER_GPII];
+	u32 error_log;
+};
+
 struct gpii_chan {
 	struct virt_dma_chan vc;
 	u32 chid;
@@ -501,6 +578,9 @@ struct gpii {
 	atomic_t dbg_index;
 	char label[GPI_LABEL_SIZE];
 	struct dentry *dentry;
+	struct gpi_reg_table dbg_reg_table;
+	bool reg_table_dump;
+	u32 dbg_gpi_irq_cnt;
 };
 
 struct gpi_desc {
@@ -610,6 +690,156 @@ static inline void gpi_write_reg_field(struct gpii *gpii,
 	gpi_write_reg(gpii, addr, val);
 }
 
+static void gpi_dump_debug_reg(struct gpii *gpii)
+{
+	struct gpi_reg_table *dbg_reg_table = &gpii->dbg_reg_table;
+	struct reg_info *reg_info;
+	int chan;
+	const gfp_t gfp = GFP_ATOMIC;
+	const struct reg_info gpii_cntxt[] = {
+		{ "TYPE_IRQ", GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS
+					(gpii->gpii_id) },
+		{ "TYPE_IRQ_MSK", GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS
+					(gpii->gpii_id) },
+		{ "CH_IRQ", GPI_GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS
+					(gpii->gpii_id) },
+		{ "EV_IRQ", GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS
+					(gpii->gpii_id) },
+		{ "CH_IRQ_MSK", GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS
+					(gpii->gpii_id) },
+		{ "EV_IRQ_MSK", GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS
+					(gpii->gpii_id) },
+		{ "IEOB_IRQ", GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_OFFS
+					(gpii->gpii_id) },
+		{ "IEOB_IRQ_MSK", GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS
+					(gpii->gpii_id) },
+		{ "GLOB_IRQ", GPI_GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS
+					(gpii->gpii_id) },
+		{ NULL },
+	};
+
+	dbg_reg_table->timestamp = sched_clock();
+
+	if (!dbg_reg_table->gpii_cntxt) {
+		dbg_reg_table->gpii_cntxt = kzalloc(sizeof(gpii_cntxt), gfp);
+		if (!dbg_reg_table->gpii_cntxt)
+			return;
+		memcpy((void *)dbg_reg_table->gpii_cntxt, (void *)gpii_cntxt,
+		       sizeof(gpii_cntxt));
+	}
+
+	/* log gpii cntxt */
+	reg_info = dbg_reg_table->gpii_cntxt;
+	for (; reg_info->name; reg_info++)
+		reg_info->val = readl_relaxed(gpii->regs + reg_info->offset);
+
+	if (!dbg_reg_table->ev_cntxt_info) {
+		dbg_reg_table->ev_cntxt_info =
+			kzalloc(sizeof(gpi_debug_ev_cntxt), gfp);
+		if (!dbg_reg_table->ev_cntxt_info)
+			return;
+		memcpy((void *)dbg_reg_table->ev_cntxt_info,
+			(void *)gpi_debug_ev_cntxt, sizeof(gpi_debug_ev_cntxt));
+	}
+
+	/* log ev cntxt */
+	reg_info = dbg_reg_table->ev_cntxt_info;
+	for (; reg_info->name; reg_info++)
+		reg_info->val = readl_relaxed(gpii->ev_cntxt_base_reg +
+					      reg_info->offset);
+
+	/* dump channel cntxt registers */
+	for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
+		if (!dbg_reg_table->chan[chan]) {
+			dbg_reg_table->chan[chan] =
+				kzalloc(sizeof(gpi_debug_ch_cntxt), gfp);
+			if (!dbg_reg_table->chan[chan])
+				return;
+			memcpy((void *)dbg_reg_table->chan[chan],
+				(void *)gpi_debug_ch_cntxt,
+				sizeof(gpi_debug_ch_cntxt));
+		}
+		reg_info = dbg_reg_table->chan[chan];
+		for (; reg_info->name; reg_info++)
+			reg_info->val =
+			readl_relaxed(
+			gpii->gpii_chan[chan].ch_cntxt_base_reg +
+							reg_info->offset);
+	}
+
+	if (!dbg_reg_table->gpi_debug_regs) {
+		dbg_reg_table->gpi_debug_regs =
+			kzalloc(sizeof(gpi_debug_regs), gfp);
+		if (!dbg_reg_table->gpi_debug_regs)
+			return;
+		memcpy((void *)dbg_reg_table->gpi_debug_regs,
+			(void *)gpi_debug_regs, sizeof(gpi_debug_regs));
+	}
+
+	/* log debug register */
+	reg_info = dbg_reg_table->gpi_debug_regs;
+	for (; reg_info->name; reg_info++)
+		reg_info->val = readl_relaxed(gpii->gpi_dev->regs +
+					reg_info->offset);
+
+	if (!dbg_reg_table->gpi_debug_qsb_regs) {
+		dbg_reg_table->gpi_debug_qsb_regs =
+			kzalloc(sizeof(gpi_debug_qsb_regs), gfp);
+		if (!dbg_reg_table->gpi_debug_qsb_regs)
+			return;
+		memcpy((void *)dbg_reg_table->gpi_debug_qsb_regs,
+			(void *)gpi_debug_qsb_regs,
+				sizeof(gpi_debug_qsb_regs));
+	}
+
+	/* log QSB register */
+	reg_info = dbg_reg_table->gpi_debug_qsb_regs;
+	for (; reg_info->name; reg_info++)
+		reg_info->val = readl_relaxed(gpii->gpi_dev->regs +
+					reg_info->offset);
+
+	/* dump scratch registers */
+	dbg_reg_table->ev_scratch_0 = readl_relaxed(gpii->regs +
+			GPI_GPII_n_CNTXT_SCRATCH_0_OFFS(gpii->gpii_id));
+	for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++)
+		dbg_reg_table->ch_scratch_0[chan] = readl_relaxed(gpii->regs +
+				GPI_GPII_n_CH_k_SCRATCH_0_OFFS(gpii->gpii_id,
+						gpii->gpii_chan[chan].chid));
+
+	/* Copy the ev ring */
+	if (!dbg_reg_table->ev_ring) {
+		dbg_reg_table->ev_ring_len = gpii->ev_ring.len;
+		dbg_reg_table->ev_ring =
+				kzalloc(dbg_reg_table->ev_ring_len, gfp);
+		if (!dbg_reg_table->ev_ring)
+			return;
+	}
+	memcpy(dbg_reg_table->ev_ring, gpii->ev_ring.base,
+		dbg_reg_table->ev_ring_len);
+
+	/* Copy Transfer rings */
+	for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
+		struct gpii_chan *gpii_chan = &gpii->gpii_chan[chan];
+
+		if (!dbg_reg_table->ch_ring[chan]) {
+			dbg_reg_table->ch_ring_len[chan] =
+					gpii_chan->ch_ring.len;
+			dbg_reg_table->ch_ring[chan] =
+				kzalloc(dbg_reg_table->ch_ring_len[chan], gfp);
+			if (!dbg_reg_table->ch_ring[chan])
+				return;
+		}
+
+		memcpy(dbg_reg_table->ch_ring[chan], gpii_chan->ch_ring.base,
+		       dbg_reg_table->ch_ring_len[chan]);
+	}
+
+	dbg_reg_table->error_log = readl_relaxed(gpii->regs +
+				GPI_GPII_n_ERROR_LOG_OFFS(gpii->gpii_id));
+
+	GPII_ERR(gpii, GPI_DBG_COMMON, "Global IRQ handling Exit\n");
+}
+
 static void gpi_disable_interrupts(struct gpii *gpii)
 {
 	struct {
@@ -996,6 +1226,37 @@ static void gpi_process_ch_ctrl_irq(struct gpii *gpii)
 	}
 }
 
+/* processing gpi general error interrupts */
+static void gpi_process_gen_err_irq(struct gpii *gpii)
+{
+	u32 gpii_id = gpii->gpii_id;
+	u32 offset = GPI_GPII_n_CNTXT_GPII_IRQ_STTS_OFFS(gpii_id);
+	u32 irq_stts = gpi_read_reg(gpii, gpii->regs + offset);
+	u32 chid;
+	struct gpii_chan *gpii_chan;
+
+	/* clear the status */
+	GPII_ERR(gpii, GPI_DBG_COMMON, "irq_stts:0x%x\n", irq_stts);
+
+	/* Notify the client about error */
+	for (chid = 0, gpii_chan = gpii->gpii_chan;
+	     chid < MAX_CHANNELS_PER_GPII; chid++, gpii_chan++)
+		if (gpii_chan->client_info.callback)
+			gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_FW_ERROR,
+					      irq_stts);
+
+	/* Clear the register */
+	offset = GPI_GPII_n_CNTXT_GPII_IRQ_CLR_OFFS(gpii_id);
+	gpi_write_reg(gpii, gpii->regs + offset, irq_stts);
+
+	gpii->dbg_gpi_irq_cnt++;
+
+	if (!gpii->reg_table_dump) {
+		gpi_dump_debug_reg(gpii);
+		gpii->reg_table_dump = true;
+	}
+}
+
 /* processing gpi level error interrupts */
 static void gpi_process_glob_err_irq(struct gpii *gpii)
 {
@@ -1151,6 +1412,7 @@ static irqreturn_t gpi_handle_irq(int irq, void *data)
 		if (type) {
 			GPII_CRITIC(gpii, GPI_DBG_COMMON,
 				 "Unhandled interrupt status:0x%x\n", type);
+			gpi_process_gen_err_irq(gpii);
 			goto exit_irq;
 		}
 		offset = GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
@@ -2544,6 +2806,9 @@ static int gpi_probe(struct platform_device *pdev)
 	if (!gpi_dev)
 		return -ENOMEM;
 
+	/* debug purpose */
+	gpi_dev_dbg = gpi_dev;
+
 	gpi_dev->dev = &pdev->dev;
 	gpi_dev->klog_lvl = DEFAULT_KLOG_LVL;
 	gpi_dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
diff --git a/drivers/dma/qcom/msm_gpi_mmio.h b/drivers/dma/qcom/msm_gpi_mmio.h
index acd74df..46ed27e 100644
--- a/drivers/dma/qcom/msm_gpi_mmio.h
+++ b/drivers/dma/qcom/msm_gpi_mmio.h
@@ -216,3 +216,15 @@ enum CNTXT_OFFS {
 #define GPI_GPII_n_CH_k_SCRATCH_3_OFFS(n, k) \
 	(0x2006C + (0x4000 * (n)) + (0x80 * (k)))
 
+/* Debug registers */
+#define GPI_DEBUG_PC_FOR_DEBUG (0x5048)
+#define GPI_DEBUG_SW_RF_n_READ(n) (0x5100 + (0x4 * n))
+
+/* GPI_DEBUG_QSB registers */
+#define GPI_DEBUG_QSB_LOG_SEL (0x5050)
+#define GPI_DEBUG_QSB_LOG_CLR (0x5058)
+#define GPI_DEBUG_QSB_LOG_ERR_TRNS_ID (0x5060)
+#define GPI_DEBUG_QSB_LOG_0 (0x5064)
+#define GPI_DEBUG_QSB_LOG_1 (0x5068)
+#define GPI_DEBUG_QSB_LOG_2 (0x506C)
+#define GPI_DEBUG_QSB_LOG_LAST_MISC_ID(n) (0x5070 + (0x4*n))
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index 473aeec..574bce6 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -532,7 +532,11 @@ void ghes_edac_unregister(struct ghes *ghes)
 	if (!ghes_pvt)
 		return;
 
+	if (atomic_dec_return(&ghes_init))
+		return;
+
 	mci = ghes_pvt->mci;
+	ghes_pvt = NULL;
 	edac_mc_del_mc(mci->pdev);
 	edac_mc_free(mci);
 }
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 2a29dd9..d54fca9 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -281,6 +281,9 @@ static __init int efivar_ssdt_load(void)
 	void *data;
 	int ret;
 
+	if (!efivar_ssdt[0])
+		return 0;
+
 	ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
 
 	list_for_each_entry_safe(entry, aux, &entries, list) {
diff --git a/drivers/firmware/google/vpd_decode.c b/drivers/firmware/google/vpd_decode.c
index e75abe9..6c7ab2b 100644
--- a/drivers/firmware/google/vpd_decode.c
+++ b/drivers/firmware/google/vpd_decode.c
@@ -62,7 +62,7 @@ static int vpd_decode_entry(const u32 max_len, const u8 *input_buf,
 	if (max_len - consumed < *entry_len)
 		return VPD_FAIL;
 
-	consumed += decoded_len;
+	consumed += *entry_len;
 	*_consumed = consumed;
 	return VPD_OK;
 }
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index 6cf2e2c..4935cda 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -529,11 +529,12 @@ static void sprd_eic_handle_one_type(struct gpio_chip *chip)
 		}
 
 		for_each_set_bit(n, &reg, SPRD_EIC_PER_BANK_NR) {
-			girq = irq_find_mapping(chip->irq.domain,
-					bank * SPRD_EIC_PER_BANK_NR + n);
+			u32 offset = bank * SPRD_EIC_PER_BANK_NR + n;
+
+			girq = irq_find_mapping(chip->irq.domain, offset);
 
 			generic_handle_irq(girq);
-			sprd_eic_toggle_trigger(chip, girq, n);
+			sprd_eic_toggle_trigger(chip, girq, offset);
 		}
 	}
 }
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 7c3b73b..bbc58021 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2656,8 +2656,10 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
 		if (!ret)
 			goto set_output_value;
 		/* Emulate open drain by not actively driving the line high */
-		if (value)
-			return gpiod_direction_input(desc);
+		if (value) {
+			ret = gpiod_direction_input(desc);
+			goto set_output_flag;
+		}
 	}
 	else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) {
 		ret = gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc),
@@ -2665,8 +2667,10 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
 		if (!ret)
 			goto set_output_value;
 		/* Emulate open source by not actively driving the line low */
-		if (!value)
-			return gpiod_direction_input(desc);
+		if (!value) {
+			ret = gpiod_direction_input(desc);
+			goto set_output_flag;
+		}
 	} else {
 		gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc),
 					    PIN_CONFIG_DRIVE_PUSH_PULL);
@@ -2674,6 +2678,17 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
 
 set_output_value:
 	return gpiod_direction_output_raw_commit(desc, value);
+
+set_output_flag:
+	/*
+	 * When emulating open-source or open-drain functionalities by not
+	 * actively driving the line (setting mode to input) we still need to
+	 * set the IS_OUT flag or otherwise we won't be able to set the line
+	 * value anymore.
+	 */
+	if (ret == 0)
+		set_bit(FLAG_IS_OUT, &desc->flags);
+	return ret;
 }
 EXPORT_SYMBOL_GPL(gpiod_direction_output);
 
@@ -2987,8 +3002,6 @@ static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
 
 	if (value) {
 		err = chip->direction_input(chip, offset);
-		if (!err)
-			clear_bit(FLAG_IS_OUT, &desc->flags);
 	} else {
 		err = chip->direction_output(chip, offset, 0);
 		if (!err)
@@ -3018,8 +3031,6 @@ static void gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value
 			set_bit(FLAG_IS_OUT, &desc->flags);
 	} else {
 		err = chip->direction_input(chip, offset);
-		if (!err)
-			clear_bit(FLAG_IS_OUT, &desc->flags);
 	}
 	trace_gpio_direction(desc_to_gpio(desc), !value, err);
 	if (err < 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b40e9c7..5e29f14 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -841,6 +841,41 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
 	if (ret == -EPROBE_DEFER)
 		return ret;
 
+#ifdef CONFIG_DRM_AMDGPU_SI
+	if (!amdgpu_si_support) {
+		switch (flags & AMD_ASIC_MASK) {
+		case CHIP_TAHITI:
+		case CHIP_PITCAIRN:
+		case CHIP_VERDE:
+		case CHIP_OLAND:
+		case CHIP_HAINAN:
+			dev_info(&pdev->dev,
+				 "SI support provided by radeon.\n");
+			dev_info(&pdev->dev,
+				 "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
+				);
+			return -ENODEV;
+		}
+	}
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+	if (!amdgpu_cik_support) {
+		switch (flags & AMD_ASIC_MASK) {
+		case CHIP_KAVERI:
+		case CHIP_BONAIRE:
+		case CHIP_HAWAII:
+		case CHIP_KABINI:
+		case CHIP_MULLINS:
+			dev_info(&pdev->dev,
+				 "CIK support provided by radeon.\n");
+			dev_info(&pdev->dev,
+				 "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
+				);
+			return -ENODEV;
+		}
+	}
+#endif
+
 	/* Get rid of things like offb */
 	ret = amdgpu_kick_out_firmware_fb(pdev);
 	if (ret)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 51b5e977..f4e9d1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -139,7 +139,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 	/* ring tests don't use a job */
 	if (job) {
 		vm = job->vm;
-		fence_ctx = job->base.s_fence->scheduled.context;
+		fence_ctx = job->base.s_fence ?
+			job->base.s_fence->scheduled.context : 0;
 	} else {
 		vm = NULL;
 		fence_ctx = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index c0396e8..ba10577 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -87,41 +87,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
 	struct amdgpu_device *adev;
 	int r, acpi_status;
 
-#ifdef CONFIG_DRM_AMDGPU_SI
-	if (!amdgpu_si_support) {
-		switch (flags & AMD_ASIC_MASK) {
-		case CHIP_TAHITI:
-		case CHIP_PITCAIRN:
-		case CHIP_VERDE:
-		case CHIP_OLAND:
-		case CHIP_HAINAN:
-			dev_info(dev->dev,
-				 "SI support provided by radeon.\n");
-			dev_info(dev->dev,
-				 "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
-				);
-			return -ENODEV;
-		}
-	}
-#endif
-#ifdef CONFIG_DRM_AMDGPU_CIK
-	if (!amdgpu_cik_support) {
-		switch (flags & AMD_ASIC_MASK) {
-		case CHIP_KAVERI:
-		case CHIP_BONAIRE:
-		case CHIP_HAWAII:
-		case CHIP_KABINI:
-		case CHIP_MULLINS:
-			dev_info(dev->dev,
-				 "CIK support provided by radeon.\n");
-			dev_info(dev->dev,
-				 "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
-				);
-			return -ENODEV;
-		}
-	}
-#endif
-
 	adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
 	if (adev == NULL) {
 		return -ENOMEM;
@@ -562,6 +527,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 		if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
 			sh_num = 0xffffffff;
 
+		if (info->read_mmr_reg.count > 128)
+			return -EINVAL;
+
 		regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
 		if (!regs)
 			return -ENOMEM;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 650554a..1c1fee0 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -174,6 +174,9 @@ static const struct edid_quirk {
 	/* Medion MD 30217 PG */
 	{ "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
 
+	/* Lenovo G50 */
+	{ "SDC", 18514, EDID_QUIRK_FORCE_6BPC },
+
 	/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
 	{ "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
 
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 663a7c9..d0e216d 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1276,9 +1276,6 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
 #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
 		((a)->lrca == (b)->lrca))
 
-#define get_last_workload(q) \
-	(list_empty(q) ? NULL : container_of(q->prev, \
-	struct intel_vgpu_workload, list))
 /**
  * intel_vgpu_create_workload - create a vGPU workload
  * @vgpu: a vGPU
@@ -1297,7 +1294,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
 {
 	struct intel_vgpu_submission *s = &vgpu->submission;
 	struct list_head *q = workload_q_head(vgpu, ring_id);
-	struct intel_vgpu_workload *last_workload = get_last_workload(q);
+	struct intel_vgpu_workload *last_workload = NULL;
 	struct intel_vgpu_workload *workload = NULL;
 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 	u64 ring_context_gpa;
@@ -1320,15 +1317,20 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
 	head &= RB_HEAD_OFF_MASK;
 	tail &= RB_TAIL_OFF_MASK;
 
-	if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
-		gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
-		gvt_dbg_el("ctx head %x real head %lx\n", head,
-				last_workload->rb_tail);
-		/*
-		 * cannot use guest context head pointer here,
-		 * as it might not be updated at this time
-		 */
-		head = last_workload->rb_tail;
+	list_for_each_entry_reverse(last_workload, q, list) {
+
+		if (same_context(&last_workload->ctx_desc, desc)) {
+			gvt_dbg_el("ring id %d cur workload == last\n",
+					ring_id);
+			gvt_dbg_el("ctx head %x real head %lx\n", head,
+					last_workload->rb_tail);
+			/*
+			 * cannot use guest context head pointer here,
+			 * as it might not be updated at this time
+			 */
+			head = last_workload->rb_tail;
+			break;
+		}
 	}
 
 	gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 96fb5f6..cc4ea55 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -429,15 +429,15 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
 	}
 
 	msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
-	if (!msm_host->byte_clk_src) {
-		ret = -ENODEV;
+	if (IS_ERR(msm_host->byte_clk_src)) {
+		ret = PTR_ERR(msm_host->byte_clk_src);
 		pr_err("%s: can't find byte_clk clock. ret=%d\n", __func__, ret);
 		goto exit;
 	}
 
 	msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
-	if (!msm_host->pixel_clk_src) {
-		ret = -ENODEV;
+	if (IS_ERR(msm_host->pixel_clk_src)) {
+		ret = PTR_ERR(msm_host->pixel_clk_src);
 		pr_err("%s: can't find pixel_clk clock. ret=%d\n", __func__, ret);
 		goto exit;
 	}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 5e01bfb..10107e5 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1517,7 +1517,8 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
 			nv_encoder->aux = aux;
 		}
 
-		if ((data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len)) &&
+		if (nv_connector->type != DCB_CONNECTOR_eDP &&
+		    (data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len)) &&
 		    ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04)) {
 			ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
 					    nv_connector->base.base.id,
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index cb80dda..7e9e2f0 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1110,7 +1110,7 @@ static const struct dss_features omap34xx_dss_feats = {
 
 static const struct dss_features omap3630_dss_feats = {
 	.model			=	DSS_MODEL_OMAP3,
-	.fck_div_max		=	32,
+	.fck_div_max		=	31,
 	.fck_freq_max		=	173000000,
 	.dss_fck_multiplier	=	1,
 	.parent_clk_name	=	"dpll4_ck",
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 25b5407..c26f09b 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -340,8 +340,39 @@ static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
 static int radeon_pci_probe(struct pci_dev *pdev,
 			    const struct pci_device_id *ent)
 {
+	unsigned long flags = 0;
 	int ret;
 
+	if (!ent)
+		return -ENODEV; /* Avoid NULL-ptr deref in drm_get_pci_dev */
+
+	flags = ent->driver_data;
+
+	if (!radeon_si_support) {
+		switch (flags & RADEON_FAMILY_MASK) {
+		case CHIP_TAHITI:
+		case CHIP_PITCAIRN:
+		case CHIP_VERDE:
+		case CHIP_OLAND:
+		case CHIP_HAINAN:
+			dev_info(&pdev->dev,
+				 "SI support disabled by module param\n");
+			return -ENODEV;
+		}
+	}
+	if (!radeon_cik_support) {
+		switch (flags & RADEON_FAMILY_MASK) {
+		case CHIP_KAVERI:
+		case CHIP_BONAIRE:
+		case CHIP_HAWAII:
+		case CHIP_KABINI:
+		case CHIP_MULLINS:
+			dev_info(&pdev->dev,
+				 "CIK support disabled by module param\n");
+			return -ENODEV;
+		}
+	}
+
 	if (vga_switcheroo_client_probe_defer(pdev))
 		return -EPROBE_DEFER;
 
@@ -364,19 +395,11 @@ radeon_pci_remove(struct pci_dev *pdev)
 static void
 radeon_pci_shutdown(struct pci_dev *pdev)
 {
-	struct drm_device *ddev = pci_get_drvdata(pdev);
-
 	/* if we are running in a VM, make sure the device
 	 * torn down properly on reboot/shutdown
 	 */
 	if (radeon_device_is_virtual())
 		radeon_pci_remove(pdev);
-
-	/* Some adapters need to be suspended before a
-	* shutdown occurs in order to prevent an error
-	* during kexec.
-	*/
-	radeon_suspend_kms(ddev, true, true, false);
 }
 
 static int radeon_pmops_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 6a8fb6f..3ff8357 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -95,31 +95,6 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
 	struct radeon_device *rdev;
 	int r, acpi_status;
 
-	if (!radeon_si_support) {
-		switch (flags & RADEON_FAMILY_MASK) {
-		case CHIP_TAHITI:
-		case CHIP_PITCAIRN:
-		case CHIP_VERDE:
-		case CHIP_OLAND:
-		case CHIP_HAINAN:
-			dev_info(dev->dev,
-				 "SI support disabled by module param\n");
-			return -ENODEV;
-		}
-	}
-	if (!radeon_cik_support) {
-		switch (flags & RADEON_FAMILY_MASK) {
-		case CHIP_KAVERI:
-		case CHIP_BONAIRE:
-		case CHIP_HAWAII:
-		case CHIP_KABINI:
-		case CHIP_MULLINS:
-			dev_info(dev->dev,
-				 "CIK support disabled by module param\n");
-			return -ENODEV;
-		}
-	}
-
 	rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
 	if (rdev == NULL) {
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 6fe91c1..185655f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -273,15 +273,13 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
 		else
 			ret = vmf_insert_pfn(&cvma, address, pfn);
 
-		/*
-		 * Somebody beat us to this PTE or prefaulting to
-		 * an already populated PTE, or prefaulting error.
-		 */
-
-		if (unlikely((ret == VM_FAULT_NOPAGE && i > 0)))
-			break;
-		else if (unlikely(ret & VM_FAULT_ERROR))
-			goto out_io_unlock;
+		/* Never error on prefaulted PTEs */
+		if (unlikely((ret & VM_FAULT_ERROR))) {
+			if (i == 0)
+				goto out_io_unlock;
+			else
+				break;
+		}
 
 		address += PAGE_SIZE;
 		if (unlikely(++page_offset >= page_last))
diff --git a/drivers/gpu/msm/Kconfig b/drivers/gpu/msm/Kconfig
index 2b0d75115..b0e45d5 100644
--- a/drivers/gpu/msm/Kconfig
+++ b/drivers/gpu/msm/Kconfig
@@ -2,11 +2,11 @@
 config QCOM_KGSL
 	tristate "Qualcomm Technologies, Inc. 3D Graphics driver"
 	depends on ARCH_QCOM
+	depends on QCOM_QFPROM
 	select GENERIC_ALLOCATOR
 	select FW_LOADER
 	select PM_DEVFREQ
 	select QCOM_SCM
-	select NVMEM
 	select DEVFREQ_GOV_SIMPLE_ONDEMAND
 	select DEVFREQ_GOV_PERFORMANCE
 	select DEVFREQ_GOV_QCOM_ADRENO_TZ
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 3a62933..8337f64 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1337,6 +1337,23 @@ static int adreno_read_speed_bin(struct platform_device *pdev,
 	return 0;
 }
 
+static int adreno_probe_efuse(struct platform_device *pdev,
+					struct adreno_device *adreno_dev)
+{
+	int ret;
+
+	ret = adreno_read_speed_bin(pdev, adreno_dev);
+	if (ret)
+		return ret;
+
+	ret = nvmem_cell_read_u32(&pdev->dev, "isense_slope",
+					&adreno_dev->lm_slope);
+	if (ret && ret != -ENOENT)
+		return ret;
+
+	return 0;
+}
+
 static int adreno_probe(struct platform_device *pdev)
 {
 	const struct of_device_id *of_id;
@@ -1364,7 +1381,7 @@ static int adreno_probe(struct platform_device *pdev)
 
 	adreno_update_soc_hw_revision_quirks(adreno_dev, pdev);
 
-	status = adreno_read_speed_bin(pdev, adreno_dev);
+	status = adreno_probe_efuse(pdev, adreno_dev);
 	if (status)
 		return status;
 
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index b50ae46..4d0569c 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -443,6 +443,7 @@ enum gpu_coresight_sources {
  * @lm_limit: limiting value for LM
  * @lm_threshold_count: register value for counter for lm threshold breakin
  * @lm_threshold_cross: number of current peaks exceeding threshold
+ * @lm_slope: Slope value in the fused register for LM
  * @ifpc_count: Number of times the GPU went into IFPC
  * @speed_bin: Indicate which power level set to use
  * @highest_bank_bit: Value of the highest bank bit
@@ -518,6 +519,7 @@ struct adreno_device {
 	uint32_t lm_limit;
 	uint32_t lm_threshold_count;
 	uint32_t lm_threshold_cross;
+	u32 lm_slope;
 	uint32_t ifpc_count;
 
 	unsigned int speed_bin;
diff --git a/drivers/gpu/msm/kgsl_hfi.c b/drivers/gpu/msm/kgsl_hfi.c
index d7ca832d..864d103 100644
--- a/drivers/gpu/msm/kgsl_hfi.c
+++ b/drivers/gpu/msm/kgsl_hfi.c
@@ -644,6 +644,29 @@ static int hfi_verify_fw_version(struct kgsl_device *device,
 	return 0;
 }
 
+static int hfi_send_lm_feature_ctrl(struct gmu_device *gmu,
+		struct adreno_device *adreno_dev)
+{
+	struct hfi_set_value_cmd req = {
+		.type = HFI_VALUE_LM_CS0,
+		.subtype = 0,
+		.data = adreno_dev->lm_slope,
+	};
+	struct kgsl_device *device = &adreno_dev->dev;
+	int ret;
+
+	if (!test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
+		return 0;
+
+	ret = hfi_send_feature_ctrl(gmu, HFI_FEATURE_LM, 1,
+			device->pwrctrl.throttle_mask);
+
+	if (!ret)
+		ret = hfi_send_req(gmu, H2F_MSG_SET_VALUE, &req);
+
+	return ret;
+}
+
 static int hfi_send_acd_feature_ctrl(struct gmu_device *gmu,
 		struct adreno_device *adreno_dev)
 {
@@ -723,12 +746,9 @@ int hfi_start(struct kgsl_device *device,
 		if (result)
 			return result;
 
-		if (test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag)) {
-			result = hfi_send_feature_ctrl(gmu, HFI_FEATURE_LM, 1,
-					device->pwrctrl.throttle_mask);
-			if (result)
-				return result;
-		}
+		result = hfi_send_lm_feature_ctrl(gmu, adreno_dev);
+		if (result)
+			return result;
 
 		result = hfi_send_core_fw_start(gmu);
 		if (result)
diff --git a/drivers/gpu/msm/kgsl_hfi.h b/drivers/gpu/msm/kgsl_hfi.h
index 8a6175f..a6d054b 100644
--- a/drivers/gpu/msm/kgsl_hfi.h
+++ b/drivers/gpu/msm/kgsl_hfi.h
@@ -96,6 +96,7 @@ struct hfi_queue_table;
 #define HFI_VALUE_LOG_EVENT_ON		112
 #define HFI_VALUE_LOG_EVENT_OFF		113
 #define HFI_VALUE_DCVS_OBJ		114
+#define HFI_VALUE_LM_CS0		115
 
 #define HFI_VALUE_GLOBAL_TOKEN		0xFFFFFFFF
 
diff --git a/drivers/gpu/msm/kgsl_pool.c b/drivers/gpu/msm/kgsl_pool.c
index 14c1c58..7077d5d 100644
--- a/drivers/gpu/msm/kgsl_pool.c
+++ b/drivers/gpu/msm/kgsl_pool.c
@@ -253,12 +253,20 @@ void kgsl_pool_free_pages(struct page **pages, unsigned int pcount)
 	if (pages == NULL || pcount == 0)
 		return;
 
+	if (WARN(!kern_addr_valid((unsigned long)pages),
+		"Address of pages=%pK is not valid\n", pages))
+		return;
+
 	for (i = 0; i < pcount;) {
 		/*
 		 * Free each page or compound page group individually.
 		 */
 		struct page *p = pages[i];
 
+		if (WARN(!kern_addr_valid((unsigned long)p),
+			"Address of page=%pK is not valid\n", p))
+			return;
+
 		i += 1 << compound_order(p);
 		kgsl_pool_free_page(p);
 	}
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index b157a60..0a22259 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -1070,7 +1070,9 @@ void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)
 		kvfree(memdesc->sgt);
 	}
 
+	memdesc->page_count = 0;
 	kvfree(memdesc->pages);
+	memdesc->pages = NULL;
 }
 EXPORT_SYMBOL(kgsl_sharedmem_free);
 
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 0ba4013..9e894c9 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -176,6 +176,12 @@ static void etm4_enable_hw(void *info)
 	if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
 		dev_err(drvdata->dev,
 			"timeout while waiting for Idle Trace Status\n");
+	/*
+	 * As recommended by section 4.3.7 ("Synchronization when using the
+	 * memory-mapped interface") of ARM IHI 0064D
+	 */
+	dsb(sy);
+	isb();
 
 	CS_LOCK(drvdata->base);
 
@@ -328,8 +334,12 @@ static void etm4_disable_hw(void *info)
 	/* EN, bit[0] Trace unit enable bit */
 	control &= ~0x1;
 
-	/* make sure everything completes before disabling */
-	mb();
+	/*
+	 * Make sure everything completes before disabling, as recommended
+	 * by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register,
+	 * SSTATUS") of ARM IHI 0064D
+	 */
+	dsb(sy);
 	isb();
 	writel_relaxed(control, drvdata->base + TRCPRGCTLR);
 
diff --git a/drivers/hwtracing/coresight/coresight-tpda.c b/drivers/hwtracing/coresight/coresight-tpda.c
index 5ee653f..e1f77f6 100644
--- a/drivers/hwtracing/coresight/coresight-tpda.c
+++ b/drivers/hwtracing/coresight/coresight-tpda.c
@@ -39,6 +39,7 @@ do {									\
 #define TPDA_FLUSH_CR		(0x090)
 #define TPDA_FLUSH_SR		(0x094)
 #define TPDA_FLUSH_ERR		(0x098)
+#define TPDA_SPARE		(0xefc)
 
 #define TPDA_MAX_INPORTS	32
 
@@ -225,6 +226,59 @@ static const struct coresight_ops tpda_cs_ops = {
 	.link_ops	= &tpda_link_ops,
 };
 
+static ssize_t legacy_ts_mode_enable_show(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	mutex_lock(&drvdata->lock);
+
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	TPDA_UNLOCK(drvdata);
+	val = tpda_readl(drvdata, TPDA_SPARE);
+	TPDA_LOCK(drvdata);
+
+	mutex_unlock(&drvdata->lock);
+	return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
+}
+
+static ssize_t legacy_ts_mode_enable_store(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf,
+					    size_t size)
+{
+	struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	mutex_lock(&drvdata->lock);
+
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	if (val) {
+		TPDA_UNLOCK(drvdata);
+		val = tpda_readl(drvdata, TPDA_SPARE);
+		val = val | BIT(0);
+		tpda_writel(drvdata, val, TPDA_SPARE);
+		TPDA_LOCK(drvdata);
+	}
+
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR_RW(legacy_ts_mode_enable);
+
 static ssize_t trig_async_enable_show(struct device *dev,
 					   struct device_attribute *attr,
 					   char *buf)
@@ -553,6 +607,7 @@ static struct attribute *tpda_attrs[] = {
 	&dev_attr_global_flush_req.attr,
 	&dev_attr_port_flush_req.attr,
 	&dev_attr_cmbchan_mode.attr,
+	&dev_attr_legacy_ts_mode_enable.attr,
 	NULL,
 };
 
diff --git a/drivers/i3c/master/i3c-master-qcom-geni.c b/drivers/i3c/master/i3c-master-qcom-geni.c
index b88425f..ee0f4a8 100644
--- a/drivers/i3c/master/i3c-master-qcom-geni.c
+++ b/drivers/i3c/master/i3c-master-qcom-geni.c
@@ -657,6 +657,10 @@ static int i3c_geni_runtime_get_mutex_lock(struct geni_i3c_dev *gi3c)
 	mutex_lock(&gi3c->lock);
 
 	reinit_completion(&gi3c->done);
+	if (!pm_runtime_enabled(gi3c->se.dev))
+		GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
+				"PM runtime disabled\n");
+
 	ret = pm_runtime_get_sync(gi3c->se.dev);
 	if (ret < 0) {
 		GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
@@ -1204,8 +1208,16 @@ static int geni_i3c_master_bus_init(struct i3c_master_controller *m)
 	ret = i3c_master_set_info(&gi3c->ctrlr, &info);
 
 err_cleanup:
-	pm_runtime_mark_last_busy(gi3c->se.dev);
-	pm_runtime_put_autosuspend(gi3c->se.dev);
+	/*As framework calls multiple exposed API's after this API, we cannot
+	 *use mutex protected internal put/get sync API. Hence forcefully
+	 *disabling clocks and decrementing usage count.
+	 */
+	disable_irq(gi3c->irq);
+	se_geni_resources_off(&gi3c->se.i3c_rsc);
+	pm_runtime_disable(gi3c->se.dev);
+	pm_runtime_put_noidle(gi3c->se.dev);
+	pm_runtime_set_suspended(gi3c->se.dev);
+	pm_runtime_enable(gi3c->se.dev);
 
 	return ret;
 }
@@ -1476,19 +1488,14 @@ static void qcom_geni_i3c_ibi_unconf(struct i3c_dev_desc *dev)
 	/* check if any IBI is enabled, if not then reset HW */
 	val = geni_read_reg(gi3c->se.ibi_base, IBI_GPII_IBI_EN);
 	if (!val) {
-		u32 wait = 100;
 
 		gi3c->ibi.err = 0;
 		reinit_completion(&gi3c->ibi.done);
 
 		val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_CONFIG);
-		val |= ~IBI_C_ENABLE;
+		val &= ~IBI_C_ENABLE;
 		geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_CONFIG);
 
-		/* enable ENABLE_CHANGE */
-		val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_IRQ_EN);
-		val |= ENABLE_CHANGE_IRQ_EN;
-		geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_IRQ_EN);
 
 		/* wait for ENABLE change */
 		timeout = wait_for_completion_timeout(&gi3c->ibi.done,
@@ -1506,54 +1513,9 @@ static void qcom_geni_i3c_ibi_unconf(struct i3c_dev_desc *dev)
 			return;
 		}
 
-		/* IBI_C reset */
-		geni_write_reg(1, gi3c->se.ibi_base, IBI_SW_RESET);
-		/*
-		 * wait for SW_RESET to be taken care by HW. Post reset it
-		 * will get cleared by HW
-		 */
-		while (wait--) {
-			if (geni_read_reg(gi3c->se.ibi_base, IBI_SW_RESET) != 0)
-				break;
-			usleep_range(IBI_SW_RESET_MIN_SLEEP,
-				IBI_SW_RESET_MAX_SLEEP);
-		}
-
-		if (!wait)
-			GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
-				"IBI controller reset failed\n");
-
-		gi3c->ibi.err = 0;
-		reinit_completion(&gi3c->ibi.done);
-
-		/* enable ENABLE_CHANGE */
-		val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_IRQ_EN);
-		val |= SW_RESET_DONE_EN;
-		geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_IRQ_EN);
-
-		/* wait for SW_RESET_DONE */
-		timeout = wait_for_completion_timeout(&gi3c->ibi.done,
-				XFER_TIMEOUT);
-		if (!timeout) {
-			GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
-				"timeout while resetting  IBI controller\n");
-			return;
-		}
-
-		if (gi3c->ibi.err) {
-			GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
-				"error while resetting IBI controller 0x%x\n",
-				gi3c->ibi.err);
-			return;
-		}
-
-		/* disable IBI interrupts */
-		geni_write_reg(0, gi3c->se.ibi_base, IBI_GEN_IRQ_EN);
 	}
 
 	gi3c->ibi.is_init = false;
-	disable_irq(gi3c->ibi.mngr_irq);
-	disable_irq(gi3c->ibi.gpii_irq[0]);
 }
 
 static void geni_i3c_master_free_ibi(struct i3c_dev_desc *dev)
@@ -1787,6 +1749,15 @@ static int i3c_ibi_rsrcs_init(struct geni_i3c_dev *gi3c,
 		return ret;
 	}
 
+	/* set mngr irq as wake-up irq */
+	ret = irq_set_irq_wake(gi3c->ibi.mngr_irq, 1);
+	if (ret) {
+		GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev,
+			"Failed to set mngr IRQ(%d) wake: err:%d\n",
+			gi3c->ibi.mngr_irq, ret);
+		return ret;
+	}
+
 	/* Register GPII interrupt */
 	gi3c->ibi.gpii_irq[0] = platform_get_irq(pdev, 2);
 	if (gi3c->ibi.gpii_irq[0] < 0) {
@@ -1805,6 +1776,15 @@ static int i3c_ibi_rsrcs_init(struct geni_i3c_dev *gi3c,
 		return ret;
 	}
 
+	/* set gpii irq as wake-up irq */
+	ret = irq_set_irq_wake(gi3c->ibi.gpii_irq[0], 1);
+	if (ret) {
+		GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev,
+			"Failed to set gpii IRQ(%d) wake: err:%d\n",
+			gi3c->ibi.gpii_irq[0], ret);
+		return ret;
+	}
+
 	qcom_geni_i3c_ibi_conf(gi3c);
 
 	return 0;
@@ -1919,6 +1899,11 @@ static int geni_i3c_remove(struct platform_device *pdev)
 	return ret;
 }
 
+static int geni_i3c_resume_noirq(struct device *dev)
+{
+	return 0;
+}
+
 #ifdef CONFIG_PM
 static int geni_i3c_runtime_suspend(struct device *dev)
 {
@@ -1943,6 +1928,18 @@ static int geni_i3c_runtime_resume(struct device *dev)
 	/* Enable TLMM I3C MODE registers */
 	return 0;
 }
+
+static int geni_i3c_suspend_noirq(struct device *dev)
+{
+	if (!pm_runtime_status_suspended(dev)) {
+		geni_i3c_runtime_suspend(dev);
+		pm_runtime_disable(dev);
+		pm_runtime_put_noidle(dev);
+		pm_runtime_set_suspended(dev);
+		pm_runtime_enable(dev);
+	}
+	return 0;
+}
 #else
 static int geni_i3c_runtime_suspend(struct device *dev)
 {
@@ -1953,9 +1950,16 @@ static int geni_i3c_runtime_resume(struct device *dev)
 {
 	return 0;
 }
+
+static int geni_i3c_suspend_noirq(struct device *dev)
+{
+	return 0;
+}
 #endif
 
 static const struct dev_pm_ops geni_i3c_pm_ops = {
+	.suspend_noirq = geni_i3c_suspend_noirq,
+	.resume_noirq = geni_i3c_resume_noirq,
 	.runtime_suspend = geni_i3c_runtime_suspend,
 	.runtime_resume  = geni_i3c_runtime_resume,
 };
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index e1da67d..9e61720 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -814,10 +814,10 @@ static int ad799x_probe(struct i2c_client *client,
 
 	ret = ad799x_write_config(st, st->chip_config->default_config);
 	if (ret < 0)
-		goto error_disable_reg;
+		goto error_disable_vref;
 	ret = ad799x_read_config(st);
 	if (ret < 0)
-		goto error_disable_reg;
+		goto error_disable_vref;
 	st->config = ret;
 
 	ret = iio_triggered_buffer_setup(indio_dev, NULL,
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index 4e339cf..e6ce25b 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -16,6 +16,7 @@
  *
  */
 
+#include <linux/dmi.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/device.h>
@@ -34,6 +35,11 @@
 #define AXP288_ADC_EN_MASK				0xF0
 #define AXP288_ADC_TS_ENABLE				0x01
 
+#define AXP288_ADC_TS_BIAS_MASK				GENMASK(5, 4)
+#define AXP288_ADC_TS_BIAS_20UA				(0 << 4)
+#define AXP288_ADC_TS_BIAS_40UA				(1 << 4)
+#define AXP288_ADC_TS_BIAS_60UA				(2 << 4)
+#define AXP288_ADC_TS_BIAS_80UA				(3 << 4)
 #define AXP288_ADC_TS_CURRENT_ON_OFF_MASK		GENMASK(1, 0)
 #define AXP288_ADC_TS_CURRENT_OFF			(0 << 0)
 #define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING		(1 << 0)
@@ -186,10 +192,36 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
 	return ret;
 }
 
+/*
+ * We rely on the machine's firmware to correctly setup the TS pin bias current
+ * at boot. This lists systems with broken fw where we need to set it ourselves.
+ */
+static const struct dmi_system_id axp288_adc_ts_bias_override[] = {
+	{
+		/* Lenovo Ideapad 100S (11 inch) */
+		.matches = {
+		  DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+		  DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 100S-11IBY"),
+		},
+		.driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA,
+	},
+	{}
+};
+
 static int axp288_adc_initialize(struct axp288_adc_info *info)
 {
+	const struct dmi_system_id *bias_override;
 	int ret, adc_enable_val;
 
+	bias_override = dmi_first_match(axp288_adc_ts_bias_override);
+	if (bias_override) {
+		ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
+					 AXP288_ADC_TS_BIAS_MASK,
+					 (uintptr_t)bias_override->driver_data);
+		if (ret)
+			return ret;
+	}
+
 	/*
 	 * Determine if the TS pin is enabled and set the TS current-source
 	 * accordingly.
diff --git a/drivers/iio/adc/hx711.c b/drivers/iio/adc/hx711.c
index 36b59d8..6c5d81a 100644
--- a/drivers/iio/adc/hx711.c
+++ b/drivers/iio/adc/hx711.c
@@ -109,14 +109,14 @@ struct hx711_data {
 
 static int hx711_cycle(struct hx711_data *hx711_data)
 {
-	int val;
+	unsigned long flags;
 
 	/*
 	 * if preempted for more then 60us while PD_SCK is high:
 	 * hx711 is going in reset
 	 * ==> measuring is false
 	 */
-	preempt_disable();
+	local_irq_save(flags);
 	gpiod_set_value(hx711_data->gpiod_pd_sck, 1);
 
 	/*
@@ -126,7 +126,6 @@ static int hx711_cycle(struct hx711_data *hx711_data)
 	 */
 	ndelay(hx711_data->data_ready_delay_ns);
 
-	val = gpiod_get_value(hx711_data->gpiod_dout);
 	/*
 	 * here we are not waiting for 0.2 us as suggested by the datasheet,
 	 * because the oscilloscope showed in a test scenario
@@ -134,7 +133,7 @@ static int hx711_cycle(struct hx711_data *hx711_data)
 	 * and 0.56 us for PD_SCK low on TI Sitara with 800 MHz
 	 */
 	gpiod_set_value(hx711_data->gpiod_pd_sck, 0);
-	preempt_enable();
+	local_irq_restore(flags);
 
 	/*
 	 * make it a square wave for addressing cases with capacitance on
@@ -142,7 +141,8 @@ static int hx711_cycle(struct hx711_data *hx711_data)
 	 */
 	ndelay(hx711_data->data_ready_delay_ns);
 
-	return val;
+	/* sample as late as possible */
+	return gpiod_get_value(hx711_data->gpiod_dout);
 }
 
 static int hx711_read(struct hx711_data *hx711_data)
diff --git a/drivers/iio/adc/qcom-spmi-adc5.c b/drivers/iio/adc/qcom-spmi-adc5.c
index 42f41f8..283b968 100644
--- a/drivers/iio/adc/qcom-spmi-adc5.c
+++ b/drivers/iio/adc/qcom-spmi-adc5.c
@@ -849,6 +849,11 @@ static int adc_get_dt_channel_data(struct device *dev,
 		prop->avg_samples = VADC_DEF_AVG_SAMPLES;
 	}
 
+	prop->scale_fn_type = -EINVAL;
+	ret = of_property_read_u32(node, "qcom,scale-fn-type", &value);
+	if (!ret && value < SCALE_HW_CALIB_MAX)
+		prop->scale_fn_type = value;
+
 	prop->lut_index = VADC_DEF_LUT_INDEX;
 
 	ret = of_property_read_u32(node, "qcom,lut-index", &value);
@@ -942,8 +947,10 @@ static int adc_get_dt_data(struct adc_chip *adc, struct device_node *node)
 			return ret;
 		}
 
-		prop.scale_fn_type =
-			data->adc_chans[prop.channel].scale_fn_type;
+		if (prop.scale_fn_type == -EINVAL)
+			prop.scale_fn_type =
+				data->adc_chans[prop.channel].scale_fn_type;
+
 		adc->chan_props[index] = prop;
 
 		adc_chan = &data->adc_chans[prop.channel];
diff --git a/drivers/iio/adc/qcom-vadc-common.c b/drivers/iio/adc/qcom-vadc-common.c
index ebcfbde..114dab7 100644
--- a/drivers/iio/adc/qcom-vadc-common.c
+++ b/drivers/iio/adc/qcom-vadc-common.c
@@ -925,6 +925,35 @@ static int qcom_vadc_scale_hw_smb_temp(
 	return 0;
 }
 
+static int qcom_vadc_scale_hw_smb1398_temp(
+				const struct vadc_prescale_ratio *prescale,
+				const struct adc_data *data,
+				u16 adc_code, int *result_mdec)
+{
+	s64 voltage = 0, adc_vdd_ref_mv = 1875;
+	u64 temp;
+
+	if (adc_code > VADC5_MAX_CODE)
+		adc_code = 0;
+
+	/* (ADC code * vref_vadc (1.875V)) / full_scale_code */
+	voltage = (s64) adc_code * adc_vdd_ref_mv * 1000;
+	voltage = div64_s64(voltage, data->full_scale_code_volt);
+	if (voltage > 0) {
+		temp = voltage * prescale->den;
+		temp *= 100;
+		do_div(temp, prescale->num * PMIC5_SMB1398_TEMP_SCALE_FACTOR);
+		voltage = temp;
+	} else {
+		voltage = 0;
+	}
+
+	voltage = voltage - PMIC5_SMB1398_TEMP_CONSTANT;
+	*result_mdec = voltage;
+
+	return 0;
+}
+
 static int qcom_vadc_scale_hw_chg5_temp(
 				const struct vadc_prescale_ratio *prescale,
 				const struct adc_data *data,
@@ -1043,6 +1072,9 @@ int qcom_vadc_hw_scale(enum vadc_scale_fn_type scaletype,
 	case SCALE_HW_CALIB_PM5_SMB_TEMP:
 		return qcom_vadc_scale_hw_smb_temp(prescale, data,
 						adc_code, result);
+	case SCALE_HW_CALIB_PM5_SMB1398_TEMP:
+		return qcom_vadc_scale_hw_smb1398_temp(prescale, data,
+						adc_code, result);
 	default:
 		return -EINVAL;
 	}
diff --git a/drivers/iio/adc/qcom-vadc-common.h b/drivers/iio/adc/qcom-vadc-common.h
index ccd3751..e77c92f 100644
--- a/drivers/iio/adc/qcom-vadc-common.h
+++ b/drivers/iio/adc/qcom-vadc-common.h
@@ -44,6 +44,8 @@
 #define PMIC5_CHG_TEMP_SCALE_FACTOR		377500
 #define PMIC5_SMB_TEMP_CONSTANT			419400
 #define PMIC5_SMB_TEMP_SCALE_FACTOR		356
+#define PMIC5_SMB1398_TEMP_SCALE_FACTOR	340
+#define PMIC5_SMB1398_TEMP_CONSTANT		268235
 
 #define PMI_CHG_SCALE_1				-138890
 #define PMI_CHG_SCALE_2				391750000000LL
@@ -175,6 +177,8 @@ enum vadc_scale_fn_type {
 	SCALE_HW_CALIB_BATT_THERM_100K,
 	SCALE_HW_CALIB_BATT_THERM_30K,
 	SCALE_HW_CALIB_BATT_THERM_400K,
+	SCALE_HW_CALIB_PM5_SMB1398_TEMP,
+	SCALE_HW_CALIB_MAX,
 };
 
 struct adc_data {
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index ca432e7..38eb966 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -21,45 +21,22 @@
 
 #include "stm32-adc-core.h"
 
-/* STM32F4 - common registers for all ADC instances: 1, 2 & 3 */
-#define STM32F4_ADC_CSR			(STM32_ADCX_COMN_OFFSET + 0x00)
-#define STM32F4_ADC_CCR			(STM32_ADCX_COMN_OFFSET + 0x04)
-
-/* STM32F4_ADC_CSR - bit fields */
-#define STM32F4_EOC3			BIT(17)
-#define STM32F4_EOC2			BIT(9)
-#define STM32F4_EOC1			BIT(1)
-
-/* STM32F4_ADC_CCR - bit fields */
-#define STM32F4_ADC_ADCPRE_SHIFT	16
-#define STM32F4_ADC_ADCPRE_MASK		GENMASK(17, 16)
-
-/* STM32H7 - common registers for all ADC instances */
-#define STM32H7_ADC_CSR			(STM32_ADCX_COMN_OFFSET + 0x00)
-#define STM32H7_ADC_CCR			(STM32_ADCX_COMN_OFFSET + 0x08)
-
-/* STM32H7_ADC_CSR - bit fields */
-#define STM32H7_EOC_SLV			BIT(18)
-#define STM32H7_EOC_MST			BIT(2)
-
-/* STM32H7_ADC_CCR - bit fields */
-#define STM32H7_PRESC_SHIFT		18
-#define STM32H7_PRESC_MASK		GENMASK(21, 18)
-#define STM32H7_CKMODE_SHIFT		16
-#define STM32H7_CKMODE_MASK		GENMASK(17, 16)
-
 /**
  * stm32_adc_common_regs - stm32 common registers, compatible dependent data
  * @csr:	common status register offset
  * @eoc1:	adc1 end of conversion flag in @csr
  * @eoc2:	adc2 end of conversion flag in @csr
  * @eoc3:	adc3 end of conversion flag in @csr
+ * @ier:	interrupt enable register offset for each adc
+ * @eocie_msk:	end of conversion interrupt enable mask in @ier
  */
 struct stm32_adc_common_regs {
 	u32 csr;
 	u32 eoc1_msk;
 	u32 eoc2_msk;
 	u32 eoc3_msk;
+	u32 ier;
+	u32 eocie_msk;
 };
 
 struct stm32_adc_priv;
@@ -268,6 +245,8 @@ static const struct stm32_adc_common_regs stm32f4_adc_common_regs = {
 	.eoc1_msk = STM32F4_EOC1,
 	.eoc2_msk = STM32F4_EOC2,
 	.eoc3_msk = STM32F4_EOC3,
+	.ier = STM32F4_ADC_CR1,
+	.eocie_msk = STM32F4_EOCIE,
 };
 
 /* STM32H7 common registers definitions */
@@ -275,8 +254,24 @@ static const struct stm32_adc_common_regs stm32h7_adc_common_regs = {
 	.csr = STM32H7_ADC_CSR,
 	.eoc1_msk = STM32H7_EOC_MST,
 	.eoc2_msk = STM32H7_EOC_SLV,
+	.ier = STM32H7_ADC_IER,
+	.eocie_msk = STM32H7_EOCIE,
 };
 
+static const unsigned int stm32_adc_offset[STM32_ADC_MAX_ADCS] = {
+	0, STM32_ADC_OFFSET, STM32_ADC_OFFSET * 2,
+};
+
+static unsigned int stm32_adc_eoc_enabled(struct stm32_adc_priv *priv,
+					  unsigned int adc)
+{
+	u32 ier, offset = stm32_adc_offset[adc];
+
+	ier = readl_relaxed(priv->common.base + offset + priv->cfg->regs->ier);
+
+	return ier & priv->cfg->regs->eocie_msk;
+}
+
 /* ADC common interrupt for all instances */
 static void stm32_adc_irq_handler(struct irq_desc *desc)
 {
@@ -287,13 +282,28 @@ static void stm32_adc_irq_handler(struct irq_desc *desc)
 	chained_irq_enter(chip, desc);
 	status = readl_relaxed(priv->common.base + priv->cfg->regs->csr);
 
-	if (status & priv->cfg->regs->eoc1_msk)
+	/*
+	 * End of conversion may be handled by using IRQ or DMA. There may be a
+	 * race here when two conversions complete at the same time on several
+	 * ADCs. EOC may be read 'set' for several ADCs, with:
+	 * - an ADC configured to use DMA (EOC triggers the DMA request, and
+	 *   is then automatically cleared by DR read in hardware)
+	 * - an ADC configured to use IRQs (EOCIE bit is set. The handler must
+	 *   be called in this case)
+	 * So both EOC status bit in CSR and EOCIE control bit must be checked
+	 * before invoking the interrupt handler (e.g. call ISR only for
+	 * IRQ-enabled ADCs).
+	 */
+	if (status & priv->cfg->regs->eoc1_msk &&
+	    stm32_adc_eoc_enabled(priv, 0))
 		generic_handle_irq(irq_find_mapping(priv->domain, 0));
 
-	if (status & priv->cfg->regs->eoc2_msk)
+	if (status & priv->cfg->regs->eoc2_msk &&
+	    stm32_adc_eoc_enabled(priv, 1))
 		generic_handle_irq(irq_find_mapping(priv->domain, 1));
 
-	if (status & priv->cfg->regs->eoc3_msk)
+	if (status & priv->cfg->regs->eoc3_msk &&
+	    stm32_adc_eoc_enabled(priv, 2))
 		generic_handle_irq(irq_find_mapping(priv->domain, 2));
 
 	chained_irq_exit(chip, desc);
diff --git a/drivers/iio/adc/stm32-adc-core.h b/drivers/iio/adc/stm32-adc-core.h
index 8af507b..2579d51 100644
--- a/drivers/iio/adc/stm32-adc-core.h
+++ b/drivers/iio/adc/stm32-adc-core.h
@@ -25,8 +25,145 @@
  * --------------------------------------------------------
  */
 #define STM32_ADC_MAX_ADCS		3
+#define STM32_ADC_OFFSET		0x100
 #define STM32_ADCX_COMN_OFFSET		0x300
 
+/* STM32F4 - Registers for each ADC instance */
+#define STM32F4_ADC_SR			0x00
+#define STM32F4_ADC_CR1			0x04
+#define STM32F4_ADC_CR2			0x08
+#define STM32F4_ADC_SMPR1		0x0C
+#define STM32F4_ADC_SMPR2		0x10
+#define STM32F4_ADC_HTR			0x24
+#define STM32F4_ADC_LTR			0x28
+#define STM32F4_ADC_SQR1		0x2C
+#define STM32F4_ADC_SQR2		0x30
+#define STM32F4_ADC_SQR3		0x34
+#define STM32F4_ADC_JSQR		0x38
+#define STM32F4_ADC_JDR1		0x3C
+#define STM32F4_ADC_JDR2		0x40
+#define STM32F4_ADC_JDR3		0x44
+#define STM32F4_ADC_JDR4		0x48
+#define STM32F4_ADC_DR			0x4C
+
+/* STM32F4 - common registers for all ADC instances: 1, 2 & 3 */
+#define STM32F4_ADC_CSR			(STM32_ADCX_COMN_OFFSET + 0x00)
+#define STM32F4_ADC_CCR			(STM32_ADCX_COMN_OFFSET + 0x04)
+
+/* STM32F4_ADC_SR - bit fields */
+#define STM32F4_STRT			BIT(4)
+#define STM32F4_EOC			BIT(1)
+
+/* STM32F4_ADC_CR1 - bit fields */
+#define STM32F4_RES_SHIFT		24
+#define STM32F4_RES_MASK		GENMASK(25, 24)
+#define STM32F4_SCAN			BIT(8)
+#define STM32F4_EOCIE			BIT(5)
+
+/* STM32F4_ADC_CR2 - bit fields */
+#define STM32F4_SWSTART			BIT(30)
+#define STM32F4_EXTEN_SHIFT		28
+#define STM32F4_EXTEN_MASK		GENMASK(29, 28)
+#define STM32F4_EXTSEL_SHIFT		24
+#define STM32F4_EXTSEL_MASK		GENMASK(27, 24)
+#define STM32F4_EOCS			BIT(10)
+#define STM32F4_DDS			BIT(9)
+#define STM32F4_DMA			BIT(8)
+#define STM32F4_ADON			BIT(0)
+
+/* STM32F4_ADC_CSR - bit fields */
+#define STM32F4_EOC3			BIT(17)
+#define STM32F4_EOC2			BIT(9)
+#define STM32F4_EOC1			BIT(1)
+
+/* STM32F4_ADC_CCR - bit fields */
+#define STM32F4_ADC_ADCPRE_SHIFT	16
+#define STM32F4_ADC_ADCPRE_MASK		GENMASK(17, 16)
+
+/* STM32H7 - Registers for each ADC instance */
+#define STM32H7_ADC_ISR			0x00
+#define STM32H7_ADC_IER			0x04
+#define STM32H7_ADC_CR			0x08
+#define STM32H7_ADC_CFGR		0x0C
+#define STM32H7_ADC_SMPR1		0x14
+#define STM32H7_ADC_SMPR2		0x18
+#define STM32H7_ADC_PCSEL		0x1C
+#define STM32H7_ADC_SQR1		0x30
+#define STM32H7_ADC_SQR2		0x34
+#define STM32H7_ADC_SQR3		0x38
+#define STM32H7_ADC_SQR4		0x3C
+#define STM32H7_ADC_DR			0x40
+#define STM32H7_ADC_DIFSEL		0xC0
+#define STM32H7_ADC_CALFACT		0xC4
+#define STM32H7_ADC_CALFACT2		0xC8
+
+/* STM32H7 - common registers for all ADC instances */
+#define STM32H7_ADC_CSR			(STM32_ADCX_COMN_OFFSET + 0x00)
+#define STM32H7_ADC_CCR			(STM32_ADCX_COMN_OFFSET + 0x08)
+
+/* STM32H7_ADC_ISR - bit fields */
+#define STM32MP1_VREGREADY		BIT(12)
+#define STM32H7_EOC			BIT(2)
+#define STM32H7_ADRDY			BIT(0)
+
+/* STM32H7_ADC_IER - bit fields */
+#define STM32H7_EOCIE			STM32H7_EOC
+
+/* STM32H7_ADC_CR - bit fields */
+#define STM32H7_ADCAL			BIT(31)
+#define STM32H7_ADCALDIF		BIT(30)
+#define STM32H7_DEEPPWD			BIT(29)
+#define STM32H7_ADVREGEN		BIT(28)
+#define STM32H7_LINCALRDYW6		BIT(27)
+#define STM32H7_LINCALRDYW5		BIT(26)
+#define STM32H7_LINCALRDYW4		BIT(25)
+#define STM32H7_LINCALRDYW3		BIT(24)
+#define STM32H7_LINCALRDYW2		BIT(23)
+#define STM32H7_LINCALRDYW1		BIT(22)
+#define STM32H7_ADCALLIN		BIT(16)
+#define STM32H7_BOOST			BIT(8)
+#define STM32H7_ADSTP			BIT(4)
+#define STM32H7_ADSTART			BIT(2)
+#define STM32H7_ADDIS			BIT(1)
+#define STM32H7_ADEN			BIT(0)
+
+/* STM32H7_ADC_CFGR bit fields */
+#define STM32H7_EXTEN_SHIFT		10
+#define STM32H7_EXTEN_MASK		GENMASK(11, 10)
+#define STM32H7_EXTSEL_SHIFT		5
+#define STM32H7_EXTSEL_MASK		GENMASK(9, 5)
+#define STM32H7_RES_SHIFT		2
+#define STM32H7_RES_MASK		GENMASK(4, 2)
+#define STM32H7_DMNGT_SHIFT		0
+#define STM32H7_DMNGT_MASK		GENMASK(1, 0)
+
+enum stm32h7_adc_dmngt {
+	STM32H7_DMNGT_DR_ONLY,		/* Regular data in DR only */
+	STM32H7_DMNGT_DMA_ONESHOT,	/* DMA one shot mode */
+	STM32H7_DMNGT_DFSDM,		/* DFSDM mode */
+	STM32H7_DMNGT_DMA_CIRC,		/* DMA circular mode */
+};
+
+/* STM32H7_ADC_CALFACT - bit fields */
+#define STM32H7_CALFACT_D_SHIFT		16
+#define STM32H7_CALFACT_D_MASK		GENMASK(26, 16)
+#define STM32H7_CALFACT_S_SHIFT		0
+#define STM32H7_CALFACT_S_MASK		GENMASK(10, 0)
+
+/* STM32H7_ADC_CALFACT2 - bit fields */
+#define STM32H7_LINCALFACT_SHIFT	0
+#define STM32H7_LINCALFACT_MASK		GENMASK(29, 0)
+
+/* STM32H7_ADC_CSR - bit fields */
+#define STM32H7_EOC_SLV			BIT(18)
+#define STM32H7_EOC_MST			BIT(2)
+
+/* STM32H7_ADC_CCR - bit fields */
+#define STM32H7_PRESC_SHIFT		18
+#define STM32H7_PRESC_MASK		GENMASK(21, 18)
+#define STM32H7_CKMODE_SHIFT		16
+#define STM32H7_CKMODE_MASK		GENMASK(17, 16)
+
 /**
  * struct stm32_adc_common - stm32 ADC driver common data (for all instances)
  * @base:		control registers base cpu addr
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 3784118..c52d20f 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -27,115 +27,6 @@
 
 #include "stm32-adc-core.h"
 
-/* STM32F4 - Registers for each ADC instance */
-#define STM32F4_ADC_SR			0x00
-#define STM32F4_ADC_CR1			0x04
-#define STM32F4_ADC_CR2			0x08
-#define STM32F4_ADC_SMPR1		0x0C
-#define STM32F4_ADC_SMPR2		0x10
-#define STM32F4_ADC_HTR			0x24
-#define STM32F4_ADC_LTR			0x28
-#define STM32F4_ADC_SQR1		0x2C
-#define STM32F4_ADC_SQR2		0x30
-#define STM32F4_ADC_SQR3		0x34
-#define STM32F4_ADC_JSQR		0x38
-#define STM32F4_ADC_JDR1		0x3C
-#define STM32F4_ADC_JDR2		0x40
-#define STM32F4_ADC_JDR3		0x44
-#define STM32F4_ADC_JDR4		0x48
-#define STM32F4_ADC_DR			0x4C
-
-/* STM32F4_ADC_SR - bit fields */
-#define STM32F4_STRT			BIT(4)
-#define STM32F4_EOC			BIT(1)
-
-/* STM32F4_ADC_CR1 - bit fields */
-#define STM32F4_RES_SHIFT		24
-#define STM32F4_RES_MASK		GENMASK(25, 24)
-#define STM32F4_SCAN			BIT(8)
-#define STM32F4_EOCIE			BIT(5)
-
-/* STM32F4_ADC_CR2 - bit fields */
-#define STM32F4_SWSTART			BIT(30)
-#define STM32F4_EXTEN_SHIFT		28
-#define STM32F4_EXTEN_MASK		GENMASK(29, 28)
-#define STM32F4_EXTSEL_SHIFT		24
-#define STM32F4_EXTSEL_MASK		GENMASK(27, 24)
-#define STM32F4_EOCS			BIT(10)
-#define STM32F4_DDS			BIT(9)
-#define STM32F4_DMA			BIT(8)
-#define STM32F4_ADON			BIT(0)
-
-/* STM32H7 - Registers for each ADC instance */
-#define STM32H7_ADC_ISR			0x00
-#define STM32H7_ADC_IER			0x04
-#define STM32H7_ADC_CR			0x08
-#define STM32H7_ADC_CFGR		0x0C
-#define STM32H7_ADC_SMPR1		0x14
-#define STM32H7_ADC_SMPR2		0x18
-#define STM32H7_ADC_PCSEL		0x1C
-#define STM32H7_ADC_SQR1		0x30
-#define STM32H7_ADC_SQR2		0x34
-#define STM32H7_ADC_SQR3		0x38
-#define STM32H7_ADC_SQR4		0x3C
-#define STM32H7_ADC_DR			0x40
-#define STM32H7_ADC_DIFSEL		0xC0
-#define STM32H7_ADC_CALFACT		0xC4
-#define STM32H7_ADC_CALFACT2		0xC8
-
-/* STM32H7_ADC_ISR - bit fields */
-#define STM32MP1_VREGREADY		BIT(12)
-#define STM32H7_EOC			BIT(2)
-#define STM32H7_ADRDY			BIT(0)
-
-/* STM32H7_ADC_IER - bit fields */
-#define STM32H7_EOCIE			STM32H7_EOC
-
-/* STM32H7_ADC_CR - bit fields */
-#define STM32H7_ADCAL			BIT(31)
-#define STM32H7_ADCALDIF		BIT(30)
-#define STM32H7_DEEPPWD			BIT(29)
-#define STM32H7_ADVREGEN		BIT(28)
-#define STM32H7_LINCALRDYW6		BIT(27)
-#define STM32H7_LINCALRDYW5		BIT(26)
-#define STM32H7_LINCALRDYW4		BIT(25)
-#define STM32H7_LINCALRDYW3		BIT(24)
-#define STM32H7_LINCALRDYW2		BIT(23)
-#define STM32H7_LINCALRDYW1		BIT(22)
-#define STM32H7_ADCALLIN		BIT(16)
-#define STM32H7_BOOST			BIT(8)
-#define STM32H7_ADSTP			BIT(4)
-#define STM32H7_ADSTART			BIT(2)
-#define STM32H7_ADDIS			BIT(1)
-#define STM32H7_ADEN			BIT(0)
-
-/* STM32H7_ADC_CFGR bit fields */
-#define STM32H7_EXTEN_SHIFT		10
-#define STM32H7_EXTEN_MASK		GENMASK(11, 10)
-#define STM32H7_EXTSEL_SHIFT		5
-#define STM32H7_EXTSEL_MASK		GENMASK(9, 5)
-#define STM32H7_RES_SHIFT		2
-#define STM32H7_RES_MASK		GENMASK(4, 2)
-#define STM32H7_DMNGT_SHIFT		0
-#define STM32H7_DMNGT_MASK		GENMASK(1, 0)
-
-enum stm32h7_adc_dmngt {
-	STM32H7_DMNGT_DR_ONLY,		/* Regular data in DR only */
-	STM32H7_DMNGT_DMA_ONESHOT,	/* DMA one shot mode */
-	STM32H7_DMNGT_DFSDM,		/* DFSDM mode */
-	STM32H7_DMNGT_DMA_CIRC,		/* DMA circular mode */
-};
-
-/* STM32H7_ADC_CALFACT - bit fields */
-#define STM32H7_CALFACT_D_SHIFT		16
-#define STM32H7_CALFACT_D_MASK		GENMASK(26, 16)
-#define STM32H7_CALFACT_S_SHIFT		0
-#define STM32H7_CALFACT_S_MASK		GENMASK(10, 0)
-
-/* STM32H7_ADC_CALFACT2 - bit fields */
-#define STM32H7_LINCALFACT_SHIFT	0
-#define STM32H7_LINCALFACT_MASK		GENMASK(29, 0)
-
 /* Number of linear calibration shadow registers / LINCALRDYW control bits */
 #define STM32H7_LINCALFACT_NUM		6
 
diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c
index 54d88b6..f9d13e4 100644
--- a/drivers/iio/light/opt3001.c
+++ b/drivers/iio/light/opt3001.c
@@ -694,6 +694,7 @@ static irqreturn_t opt3001_irq(int irq, void *_iio)
 	struct iio_dev *iio = _iio;
 	struct opt3001 *opt = iio_priv(iio);
 	int ret;
+	bool wake_result_ready_queue = false;
 
 	if (!opt->ok_to_ignore_lock)
 		mutex_lock(&opt->lock);
@@ -728,13 +729,16 @@ static irqreturn_t opt3001_irq(int irq, void *_iio)
 		}
 		opt->result = ret;
 		opt->result_ready = true;
-		wake_up(&opt->result_ready_queue);
+		wake_result_ready_queue = true;
 	}
 
 out:
 	if (!opt->ok_to_ignore_lock)
 		mutex_unlock(&opt->lock);
 
+	if (wake_result_ready_queue)
+		wake_up(&opt->result_ready_queue);
+
 	return IRQ_HANDLED;
 }
 
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 7b76e6f..f2fb731 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -274,13 +274,17 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
 			   struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp)
 {
 	int err;
-	struct fw_ri_tpte tpt;
+	struct fw_ri_tpte *tpt;
 	u32 stag_idx;
 	static atomic_t key;
 
 	if (c4iw_fatal_error(rdev))
 		return -EIO;
 
+	tpt = kmalloc(sizeof(*tpt), GFP_KERNEL);
+	if (!tpt)
+		return -ENOMEM;
+
 	stag_state = stag_state > 0;
 	stag_idx = (*stag) >> 8;
 
@@ -290,6 +294,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
 			mutex_lock(&rdev->stats.lock);
 			rdev->stats.stag.fail++;
 			mutex_unlock(&rdev->stats.lock);
+			kfree(tpt);
 			return -ENOMEM;
 		}
 		mutex_lock(&rdev->stats.lock);
@@ -304,28 +309,28 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
 
 	/* write TPT entry */
 	if (reset_tpt_entry)
-		memset(&tpt, 0, sizeof(tpt));
+		memset(tpt, 0, sizeof(*tpt));
 	else {
-		tpt.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
+		tpt->valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
 			FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) |
 			FW_RI_TPTE_STAGSTATE_V(stag_state) |
 			FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid));
-		tpt.locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
+		tpt->locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
 			(bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) |
 			FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO :
 						      FW_RI_VA_BASED_TO))|
 			FW_RI_TPTE_PS_V(page_size));
-		tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
+		tpt->nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
 			FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3));
-		tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
-		tpt.va_hi = cpu_to_be32((u32)(to >> 32));
-		tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
-		tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
-		tpt.len_hi = cpu_to_be32((u32)(len >> 32));
+		tpt->len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
+		tpt->va_hi = cpu_to_be32((u32)(to >> 32));
+		tpt->va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
+		tpt->dca_mwbcnt_pstag = cpu_to_be32(0);
+		tpt->len_hi = cpu_to_be32((u32)(len >> 32));
 	}
 	err = write_adapter_mem(rdev, stag_idx +
 				(rdev->lldi.vr->stag.start >> 5),
-				sizeof(tpt), &tpt, skb, wr_waitp);
+				sizeof(*tpt), tpt, skb, wr_waitp);
 
 	if (reset_tpt_entry) {
 		c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
@@ -333,6 +338,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
 		rdev->stats.stag.cur -= 32;
 		mutex_unlock(&rdev->stats.lock);
 	}
+	kfree(tpt);
 	return err;
 }
 
diff --git a/drivers/input/misc/da9063_onkey.c b/drivers/input/misc/da9063_onkey.c
index 3e9c353..a01b25f 100644
--- a/drivers/input/misc/da9063_onkey.c
+++ b/drivers/input/misc/da9063_onkey.c
@@ -248,10 +248,7 @@ static int da9063_onkey_probe(struct platform_device *pdev)
 	onkey->input->phys = onkey->phys;
 	onkey->input->dev.parent = &pdev->dev;
 
-	if (onkey->key_power)
-		input_set_capability(onkey->input, EV_KEY, KEY_POWER);
-
-	input_set_capability(onkey->input, EV_KEY, KEY_SLEEP);
+	input_set_capability(onkey->input, EV_KEY, KEY_POWER);
 
 	INIT_DELAYED_WORK(&onkey->work, da9063_poll_on);
 
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 7fb358f..162526a 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -149,7 +149,7 @@ static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
 	}
 
 	mutex_lock(&data->irq_mutex);
-	bitmap_and(data->irq_status, data->irq_status, data->current_irq_mask,
+	bitmap_and(data->irq_status, data->irq_status, data->fn_irq_bits,
 	       data->irq_count);
 	/*
 	 * At this point, irq_status has all bits that are set in the
@@ -388,6 +388,8 @@ static int rmi_driver_set_irq_bits(struct rmi_device *rmi_dev,
 	bitmap_copy(data->current_irq_mask, data->new_irq_mask,
 		    data->num_of_irq_regs);
 
+	bitmap_or(data->fn_irq_bits, data->fn_irq_bits, mask, data->irq_count);
+
 error_unlock:
 	mutex_unlock(&data->irq_mutex);
 	return error;
@@ -401,6 +403,8 @@ static int rmi_driver_clear_irq_bits(struct rmi_device *rmi_dev,
 	struct device *dev = &rmi_dev->dev;
 
 	mutex_lock(&data->irq_mutex);
+	bitmap_andnot(data->fn_irq_bits,
+		      data->fn_irq_bits, mask, data->irq_count);
 	bitmap_andnot(data->new_irq_mask,
 		  data->current_irq_mask, mask, data->irq_count);
 
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index b29a832..84ff700 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -541,7 +541,7 @@ static void wake_migration_worker(struct cache *cache)
 
 static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache)
 {
-	return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOWAIT);
+	return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOIO);
 }
 
 static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell)
@@ -553,9 +553,7 @@ static struct dm_cache_migration *alloc_migration(struct cache *cache)
 {
 	struct dm_cache_migration *mg;
 
-	mg = mempool_alloc(&cache->migration_pool, GFP_NOWAIT);
-	if (!mg)
-		return NULL;
+	mg = mempool_alloc(&cache->migration_pool, GFP_NOIO);
 
 	memset(mg, 0, sizeof(*mg));
 
@@ -663,10 +661,6 @@ static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bi
 	struct dm_bio_prison_cell_v2 *cell_prealloc, *cell;
 
 	cell_prealloc = alloc_prison_cell(cache); /* FIXME: allow wait if calling from worker */
-	if (!cell_prealloc) {
-		defer_bio(cache, bio);
-		return false;
-	}
 
 	build_key(oblock, end, &key);
 	r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell);
@@ -1492,11 +1486,6 @@ static int mg_lock_writes(struct dm_cache_migration *mg)
 	struct dm_bio_prison_cell_v2 *prealloc;
 
 	prealloc = alloc_prison_cell(cache);
-	if (!prealloc) {
-		DMERR_LIMIT("%s: alloc_prison_cell failed", cache_device_name(cache));
-		mg_complete(mg, false);
-		return -ENOMEM;
-	}
 
 	/*
 	 * Prevent writes to the block, but allow reads to continue.
@@ -1534,11 +1523,6 @@ static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio
 	}
 
 	mg = alloc_migration(cache);
-	if (!mg) {
-		policy_complete_background_work(cache->policy, op, false);
-		background_work_end(cache);
-		return -ENOMEM;
-	}
 
 	mg->op = op;
 	mg->overwrite_bio = bio;
@@ -1627,10 +1611,6 @@ static int invalidate_lock(struct dm_cache_migration *mg)
 	struct dm_bio_prison_cell_v2 *prealloc;
 
 	prealloc = alloc_prison_cell(cache);
-	if (!prealloc) {
-		invalidate_complete(mg, false);
-		return -ENOMEM;
-	}
 
 	build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), &key);
 	r = dm_cell_lock_v2(cache->prison, &key,
@@ -1668,10 +1648,6 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
 		return -EPERM;
 
 	mg = alloc_migration(cache);
-	if (!mg) {
-		background_work_end(cache);
-		return -ENOMEM;
-	}
 
 	mg->overwrite_bio = bio;
 	mg->invalidate_cblock = cblock;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 43fa7db..3cafbfd 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -158,7 +158,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
 	} else {
 		pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
 		       mdname(mddev));
-		pr_err("md/raid0: please set raid.default_layout to 1 or 2\n");
+		pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
 		err = -ENOTSUPP;
 		goto abort;
 	}
diff --git a/drivers/media/platform/msm/cvp/msm_cvp.c b/drivers/media/platform/msm/cvp/msm_cvp.c
index 1f168ea..d90ca95 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp.c
@@ -1440,7 +1440,8 @@ static bool is_subblock_profile_existed(struct msm_cvp_inst *inst)
 {
 	return (inst->prop.od_cycles ||
 			inst->prop.mpu_cycles ||
-			inst->prop.fdu_cycles);
+			inst->prop.fdu_cycles ||
+			inst->prop.ica_cycles);
 }
 
 static void aggregate_power_update(struct msm_cvp_core *core,
diff --git a/drivers/media/platform/msm/npu/npu_common.h b/drivers/media/platform/msm/npu/npu_common.h
index 885ea54..ff9a7bd 100644
--- a/drivers/media/platform/msm/npu/npu_common.h
+++ b/drivers/media/platform/msm/npu/npu_common.h
@@ -336,5 +336,6 @@ void disable_fw(struct npu_device *npu_dev);
 int load_fw(struct npu_device *npu_dev);
 int unload_fw(struct npu_device *npu_dev);
 int npu_set_bw(struct npu_device *npu_dev, int new_ib, int new_ab);
+int npu_process_kevent(struct npu_client *client, struct npu_kevent *kevt);
 
 #endif /* _NPU_COMMON_H */
diff --git a/drivers/media/platform/msm/npu/npu_dev.c b/drivers/media/platform/msm/npu/npu_dev.c
index 24359cf..dedb341 100644
--- a/drivers/media/platform/msm/npu/npu_dev.c
+++ b/drivers/media/platform/msm/npu/npu_dev.c
@@ -1319,28 +1319,6 @@ static int npu_exec_network_v2(struct npu_client *client,
 	return ret;
 }
 
-static int npu_process_kevent(struct npu_kevent *kevt)
-{
-	int ret = 0;
-
-	switch (kevt->evt.type) {
-	case MSM_NPU_EVENT_TYPE_EXEC_V2_DONE:
-		ret = copy_to_user((void __user *)kevt->reserved[1],
-			(void *)&kevt->reserved[0],
-			kevt->evt.u.exec_v2_done.stats_buf_size);
-		if (ret) {
-			NPU_ERR("fail to copy to user\n");
-			kevt->evt.u.exec_v2_done.stats_buf_size = 0;
-			ret = -EFAULT;
-		}
-		break;
-	default:
-		break;
-	}
-
-	return ret;
-}
-
 static int npu_receive_event(struct npu_client *client,
 	unsigned long arg)
 {
@@ -1356,7 +1334,7 @@ static int npu_receive_event(struct npu_client *client,
 		kevt = list_first_entry(&client->evt_list,
 			struct npu_kevent, list);
 		list_del(&kevt->list);
-		npu_process_kevent(kevt);
+		npu_process_kevent(client, kevt);
 		ret = copy_to_user(argp, &kevt->evt,
 			sizeof(struct msm_npu_event));
 		if (ret) {
@@ -1472,6 +1450,21 @@ static int npu_get_property(struct npu_client *client,
 	case MSM_NPU_PROP_ID_HARDWARE_VERSION:
 		prop.prop_param[0] = npu_dev->hw_version;
 		break;
+	case MSM_NPU_PROP_ID_IPC_QUEUE_INFO:
+		ret = npu_host_get_ipc_queue_size(npu_dev,
+			prop.prop_param[0]);
+		if (ret < 0) {
+			NPU_ERR("Can't get ipc queue %d size",
+				prop.prop_param[0]);
+			return ret;
+		}
+
+		prop.prop_param[1] = ret;
+		break;
+	case MSM_NPU_PROP_ID_DRV_FEATURE:
+		prop.prop_param[0] = MSM_NPU_FEATURE_MULTI_EXECUTE |
+			MSM_NPU_FEATURE_ASYNC_EXECUTE;
+		break;
 	default:
 		ret = npu_host_get_fw_property(client->npu_dev, &prop);
 		if (ret) {
diff --git a/drivers/media/platform/msm/npu/npu_host_ipc.c b/drivers/media/platform/msm/npu/npu_host_ipc.c
index b36aa62..573f272 100644
--- a/drivers/media/platform/msm/npu/npu_host_ipc.c
+++ b/drivers/media/platform/msm/npu/npu_host_ipc.c
@@ -411,3 +411,13 @@ int npu_host_ipc_post_init(struct npu_device *npu_dev)
 {
 	return 0;
 }
+
+int npu_host_get_ipc_queue_size(struct npu_device *npu_dev, uint32_t q_idx)
+{
+	if (q_idx >= ARRAY_SIZE(npu_q_setup)) {
+		NPU_ERR("Invalid ipc queue index %d\n", q_idx);
+		return -EINVAL;
+	}
+
+	return npu_q_setup[q_idx].size;
+}
diff --git a/drivers/media/platform/msm/npu/npu_mgr.c b/drivers/media/platform/msm/npu/npu_mgr.c
index b30488e..15af1c9 100644
--- a/drivers/media/platform/msm/npu/npu_mgr.c
+++ b/drivers/media/platform/msm/npu/npu_mgr.c
@@ -57,7 +57,8 @@ static void host_session_msg_hdlr(struct npu_device *npu_dev);
 static void host_session_log_hdlr(struct npu_device *npu_dev);
 static int host_error_hdlr(struct npu_device *npu_dev, bool force);
 static int npu_send_network_cmd(struct npu_device *npu_dev,
-	struct npu_network *network, void *cmd_ptr, bool async, bool force);
+	struct npu_network *network, void *cmd_ptr,
+	struct npu_network_cmd *cmd);
 static int npu_send_misc_cmd(struct npu_device *npu_dev, uint32_t q_idx,
 	void *cmd_ptr);
 static int npu_queue_event(struct npu_client *client, struct npu_kevent *evt);
@@ -67,6 +68,16 @@ static int npu_notify_fw_pwr_state(struct npu_device *npu_dev,
 static int load_fw_nolock(struct npu_device *npu_dev, bool enable);
 static void disable_fw_nolock(struct npu_device *npu_dev);
 static int update_dcvs_activity(struct npu_device *npu_dev, uint32_t activity);
+static void npu_queue_network_cmd(struct npu_network *network,
+	struct npu_network_cmd *cmd);
+static void npu_dequeue_network_cmd(struct npu_network *network,
+	struct npu_network_cmd *cmd);
+static struct npu_network_cmd *npu_find_network_cmd(struct npu_network *network,
+	uint32_t trans_id);
+static struct npu_network_cmd *npu_alloc_network_cmd(struct npu_host_ctx *ctx,
+	uint32_t stats_buf_size);
+static void npu_free_network_cmd(struct npu_host_ctx *ctx,
+	struct npu_network_cmd *cmd);
 
 /* -------------------------------------------------------------------------
  * Function Definitions - Init / Deinit
@@ -603,7 +614,7 @@ int npu_host_update_power(struct npu_device *npu_dev)
 
 int npu_host_init(struct npu_device *npu_dev)
 {
-	int sts = 0;
+	int ret = 0;
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 
 	memset(host_ctx, 0, sizeof(*host_ctx));
@@ -622,7 +633,7 @@ int npu_host_init(struct npu_device *npu_dev)
 		&host_ctx->nb);
 	if (IS_ERR(host_ctx->notif_hdle)) {
 		NPU_ERR("register event notification failed\n");
-		sts = PTR_ERR(host_ctx->notif_hdle);
+		ret = PTR_ERR(host_ctx->notif_hdle);
 		host_ctx->notif_hdle = NULL;
 		goto fail;
 	}
@@ -631,7 +642,7 @@ int npu_host_init(struct npu_device *npu_dev)
 	host_ctx->wq_pri =
 		alloc_workqueue("npu_ipc_wq", WQ_HIGHPRI | WQ_UNBOUND, 0);
 	if (!host_ctx->wq || !host_ctx->wq_pri) {
-		sts = -EPERM;
+		ret = -EPERM;
 		goto fail;
 	} else {
 		INIT_WORK(&host_ctx->ipc_irq_work, npu_ipc_irq_work);
@@ -643,25 +654,46 @@ int npu_host_init(struct npu_device *npu_dev)
 			npu_disable_fw_work);
 	}
 
+	host_ctx->network_cmd_cache = kmem_cache_create("network_cmd_cache",
+		sizeof(struct npu_network_cmd), 0, 0, NULL);
+	if (!host_ctx->network_cmd_cache) {
+		NPU_ERR("Failed to create network_cmd_cache\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	host_ctx->stats_buf_cache = kmem_cache_create_usercopy(
+		"stats_buf_cache", NPU_MAX_STATS_BUF_SIZE, 0, 0,
+		0, NPU_MAX_STATS_BUF_SIZE, NULL);
+	if (!host_ctx->stats_buf_cache) {
+		NPU_ERR("Failed to create stats_buf_cache\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+
 	host_ctx->ipc_msg_buf = kzalloc(NPU_IPC_BUF_LENGTH, GFP_KERNEL);
 	if (!host_ctx->ipc_msg_buf) {
 		NPU_ERR("Failed to allocate ipc buffer\n");
-		sts = -ENOMEM;
+		ret = -ENOMEM;
 		goto fail;
 	}
 
 	host_ctx->prop_buf = kzalloc(sizeof(struct msm_npu_property),
 		GFP_KERNEL);
 	if (!host_ctx->prop_buf) {
-		sts = -ENOMEM;
+		NPU_ERR("Failed to allocate prop buffer\n");
+		ret = -ENOMEM;
 		goto fail;
 	}
 
 	host_ctx->auto_pil_disable = false;
 
-	return sts;
+	return 0;
+
 fail:
 	kfree(host_ctx->ipc_msg_buf);
+	kmem_cache_destroy(host_ctx->stats_buf_cache);
+	kmem_cache_destroy(host_ctx->network_cmd_cache);
 	if (host_ctx->wq)
 		destroy_workqueue(host_ctx->wq);
 	if (host_ctx->wq_pri)
@@ -670,7 +702,7 @@ int npu_host_init(struct npu_device *npu_dev)
 		subsys_notif_unregister_notifier(host_ctx->notif_hdle,
 			&host_ctx->nb);
 	mutex_destroy(&host_ctx->lock);
-	return sts;
+	return ret;
 }
 
 void npu_host_deinit(struct npu_device *npu_dev)
@@ -679,6 +711,8 @@ void npu_host_deinit(struct npu_device *npu_dev)
 
 	kfree(host_ctx->prop_buf);
 	kfree(host_ctx->ipc_msg_buf);
+	kmem_cache_destroy(host_ctx->stats_buf_cache);
+	kmem_cache_destroy(host_ctx->network_cmd_cache);
 	destroy_workqueue(host_ctx->wq);
 	destroy_workqueue(host_ctx->wq_pri);
 	subsys_notif_unregister_notifier(host_ctx->notif_hdle, &host_ctx->nb);
@@ -776,6 +810,7 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force)
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 	struct npu_network *network = NULL;
 	struct npu_kevent kevt;
+	struct npu_network_cmd *cmd;
 	bool fw_alive = true;
 	int i, ret = 0;
 
@@ -809,10 +844,6 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force)
 	}
 
 	NPU_INFO("npu subsystem is restarting\n");
-
-	/* clear FW_CTRL_STATUS register before restart */
-	REGW(npu_dev, REG_NPU_FW_CTRL_STATUS, 0x0);
-
 	reinit_completion(&host_ctx->npu_power_up_done);
 	ret = subsystem_restart_dev(host_ctx->subsystem_handle);
 	if (ret) {
@@ -862,19 +893,28 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force)
 	/* flush all pending npu cmds */
 	for (i = 0; i < MAX_LOADED_NETWORK; i++) {
 		network = &host_ctx->networks[i];
-		if (network->is_valid && network->cmd_pending &&
-			network->fw_error) {
-			if (network->cmd_async) {
-				NPU_DBG("async cmd, queue ssr event\n");
-				kevt.evt.type = MSM_NPU_EVENT_TYPE_SSR;
-				kevt.evt.u.ssr.network_hdl =
-					network->network_hdl;
-				if (npu_queue_event(network->client, &kevt))
-					NPU_ERR("queue npu event failed\n");
-			} else {
-				NPU_DBG("complete network %llx\n",
-					network->id);
-				complete(&network->cmd_done);
+		if (!network->is_valid || !network->fw_error)
+			continue;
+
+		if (network->is_async) {
+			NPU_DBG("async cmd, queue ssr event\n");
+			kevt.evt.type = MSM_NPU_EVENT_TYPE_SSR;
+			kevt.evt.u.ssr.network_hdl =
+				network->network_hdl;
+			if (npu_queue_event(network->client, &kevt))
+				NPU_ERR("queue npu event failed\n");
+
+			while (!list_empty(&network->cmd_list)) {
+				cmd = list_first_entry(&network->cmd_list,
+					struct npu_network_cmd, list);
+				npu_dequeue_network_cmd(network, cmd);
+				npu_free_network_cmd(host_ctx, cmd);
+			}
+		} else {
+			list_for_each_entry(cmd, &network->cmd_list, list) {
+				NPU_DBG("complete network %llx trans_id %d\n",
+					network->id, cmd->trans_id);
+				complete(&cmd->cmd_done);
 			}
 		}
 	}
@@ -1127,15 +1167,9 @@ static struct npu_network *alloc_network(struct npu_host_ctx *ctx,
 
 	memset(network, 0, sizeof(struct npu_network));
 	network->id = i + 1;
-	init_completion(&network->cmd_done);
 	network->is_valid = true;
 	network->client = client;
-	network->stats_buf = kzalloc(NPU_MAX_STATS_BUF_SIZE,
-		GFP_KERNEL);
-	if (!network->stats_buf) {
-		memset(network, 0, sizeof(struct npu_network));
-		return NULL;
-	}
+	INIT_LIST_HEAD(&network->cmd_list);
 
 	ctx->network_num++;
 	return network;
@@ -1198,14 +1232,23 @@ static void free_network(struct npu_host_ctx *ctx, struct npu_client *client,
 	int64_t id)
 {
 	struct npu_network *network = NULL;
+	struct npu_network_cmd *cmd;
 
 	WARN_ON(!mutex_is_locked(&ctx->lock));
 
 	network = get_network_by_id(ctx, client, id);
 	if (network) {
 		network_put(network);
+		while (!list_empty(&network->cmd_list)) {
+			cmd = list_first_entry(&network->cmd_list,
+				struct npu_network_cmd, list);
+			NPU_WARN("Free cmd %x type %x\n", cmd->cmd_id,
+				cmd->cmd_type);
+			npu_dequeue_network_cmd(network, cmd);
+			npu_free_network_cmd(ctx, cmd);
+		}
+
 		if (atomic_read(&network->ref_cnt) == 0) {
-			kfree(network->stats_buf);
 			memset(network, 0, sizeof(struct npu_network));
 			ctx->network_num--;
 		} else {
@@ -1219,6 +1262,42 @@ static void free_network(struct npu_host_ctx *ctx, struct npu_client *client,
  * Function Definitions - IPC
  * -------------------------------------------------------------------------
  */
+static struct npu_network_cmd *npu_alloc_network_cmd(struct npu_host_ctx *ctx,
+	uint32_t stats_buf_size)
+{
+	struct npu_network_cmd *cmd = NULL;
+
+	cmd = kmem_cache_zalloc(ctx->network_cmd_cache, GFP_KERNEL);
+	if (!cmd) {
+		NPU_ERR("Can't allocate network cmd\n");
+		return NULL;
+	}
+
+	init_completion(&cmd->cmd_done);
+
+	if (stats_buf_size == 0)
+		return cmd;
+
+	cmd->stats_buf = kmem_cache_zalloc(ctx->stats_buf_cache,
+		GFP_KERNEL);
+	if (!cmd->stats_buf) {
+		NPU_ERR("Can't allocate stats buf\n");
+		kmem_cache_free(ctx->network_cmd_cache, cmd);
+		return NULL;
+	}
+	cmd->stats_buf_size = stats_buf_size;
+
+	return cmd;
+}
+
+static void npu_free_network_cmd(struct npu_host_ctx *ctx,
+	struct npu_network_cmd *cmd)
+{
+	if (cmd->stats_buf)
+		kmem_cache_free(ctx->stats_buf_cache, cmd->stats_buf);
+	kmem_cache_free(ctx->network_cmd_cache, cmd);
+}
+
 static int npu_queue_event(struct npu_client *client, struct npu_kevent *evt)
 {
 	struct npu_kevent *kevt = kmalloc(sizeof(*kevt), GFP_KERNEL);
@@ -1236,12 +1315,96 @@ static int npu_queue_event(struct npu_client *client, struct npu_kevent *evt)
 	return 0;
 }
 
+static void npu_queue_network_cmd(struct npu_network *network,
+	struct npu_network_cmd *cmd)
+{
+	INIT_LIST_HEAD(&cmd->list);
+	list_add_tail(&cmd->list, &network->cmd_list);
+}
+
+static void npu_dequeue_network_cmd(struct npu_network *network,
+	struct npu_network_cmd *cmd)
+{
+	list_del(&cmd->list);
+}
+
+static struct npu_network_cmd *npu_find_network_cmd(struct npu_network *network,
+	uint32_t trans_id)
+{
+	struct npu_network_cmd *cmd;
+
+	list_for_each_entry(cmd, &network->cmd_list, list) {
+		if (cmd->trans_id == trans_id) {
+			NPU_DBG("find cmd for trans_id %d\n", trans_id);
+			return cmd;
+		}
+	}
+
+	NPU_ERR("can't find cmd for trans_id %d\n", trans_id);
+	return NULL;
+}
+
+int npu_process_kevent(struct npu_client *client, struct npu_kevent *kevt)
+{
+	struct npu_device *npu_dev = client->npu_dev;
+	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+	int ret = 0;
+
+	mutex_lock(&host_ctx->lock);
+
+	switch (kevt->evt.type) {
+	case MSM_NPU_EVENT_TYPE_EXEC_V2_DONE:
+	{
+		struct npu_network_cmd *cmd = NULL;
+		struct npu_network *network;
+
+		network = get_network_by_hdl(host_ctx,
+			client, kevt->reserved[0]);
+		if (!network) {
+			NPU_ERR("Can't find network %x\n", kevt->reserved[0]);
+			ret = -EINVAL;
+			break;
+		}
+
+		cmd = npu_find_network_cmd(network, kevt->reserved[1]);
+		if (!cmd) {
+			NPU_ERR("can't find exec cmd with trans_id:%d\n",
+				kevt->reserved[1]);
+			network_put(network);
+			ret = -EINVAL;
+			break;
+		}
+
+		kevt->evt.reserved[0] = cmd->cmd_id;
+		ret = copy_to_user((void __user *)cmd->stats_buf_u,
+			(void *)cmd->stats_buf,
+			kevt->evt.u.exec_v2_done.stats_buf_size);
+		if (ret) {
+			NPU_ERR("fail to copy to user\n");
+			kevt->evt.u.exec_v2_done.stats_buf_size = 0;
+			ret = -EFAULT;
+		}
+
+		npu_dequeue_network_cmd(network, cmd);
+		npu_free_network_cmd(host_ctx, cmd);
+		network_put(network);
+		break;
+	}
+	default:
+		break;
+	}
+	mutex_unlock(&host_ctx->lock);
+
+	return ret;
+}
+
 static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
 {
 	uint32_t msg_id;
 	struct npu_network *network = NULL;
 	struct npu_kevent kevt;
 	struct npu_device *npu_dev = host_ctx->npu_dev;
+	struct npu_network_cmd *network_cmd = NULL;
 
 	msg_id = msg[1];
 	switch (msg_id) {
@@ -1262,19 +1425,19 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
 			break;
 		}
 
-		if (network->trans_id != exe_rsp_pkt->header.trans_id) {
-			NPU_ERR("execute_pkt trans_id is not match %d:%d\n",
-				network->trans_id,
+		network_cmd = npu_find_network_cmd(network,
+			exe_rsp_pkt->header.trans_id);
+		if (!network_cmd) {
+			NPU_ERR("can't find exec cmd with trans_id:%d\n",
 				exe_rsp_pkt->header.trans_id);
 			network_put(network);
 			break;
 		}
 
-		network->cmd_pending = false;
-		network->cmd_ret_status = exe_rsp_pkt->header.status;
+		network_cmd->ret_status = exe_rsp_pkt->header.status;
 
-		if (!network->cmd_async) {
-			complete(&network->cmd_done);
+		if (!network_cmd->async) {
+			complete(&network_cmd->cmd_done);
 		} else {
 			NPU_DBG("async cmd, queue event\n");
 			kevt.evt.type = MSM_NPU_EVENT_TYPE_EXEC_DONE;
@@ -1307,10 +1470,12 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
 			break;
 		}
 
-		if (network->trans_id != exe_rsp_pkt->header.trans_id) {
-			NPU_ERR("execute_pkt_v2 trans_id is not match %d:%d\n",
-				network->trans_id,
-				exe_rsp_pkt->header.trans_id);
+		network_cmd = npu_find_network_cmd(network,
+			exe_rsp_pkt->header.trans_id);
+		if (!network_cmd) {
+			NPU_ERR("can't find exec cmd with trans_id:%d:%d\n",
+				exe_rsp_pkt->header.trans_id,
+				exe_rsp_pkt->network_hdl);
 			network_put(network);
 			break;
 		}
@@ -1319,17 +1484,16 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
 		stats_size = exe_rsp_pkt->header.size - sizeof(*exe_rsp_pkt);
 		NPU_DBG("stats_size %d:%d\n", exe_rsp_pkt->header.size,
 			stats_size);
-		stats_size = stats_size < network->stats_buf_size ?
-			stats_size : network->stats_buf_size;
+		stats_size = stats_size < network_cmd->stats_buf_size ?
+			stats_size : network_cmd->stats_buf_size;
 		if (stats_size)
-			memcpy(network->stats_buf, exe_rsp_pkt->stats_data,
+			memcpy(network_cmd->stats_buf, exe_rsp_pkt->stats_data,
 				stats_size);
 
-		network->stats_buf_size = stats_size;
-		network->cmd_pending = false;
-		network->cmd_ret_status = exe_rsp_pkt->header.status;
+		network_cmd->stats_buf_size = stats_size;
+		network_cmd->ret_status = exe_rsp_pkt->header.status;
 
-		if (network->cmd_async) {
+		if (network_cmd->async) {
 			NPU_DBG("async cmd, queue event\n");
 			kevt.evt.type = MSM_NPU_EVENT_TYPE_EXEC_V2_DONE;
 			kevt.evt.u.exec_v2_done.network_hdl =
@@ -1337,12 +1501,12 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
 			kevt.evt.u.exec_v2_done.exec_result =
 				exe_rsp_pkt->header.status;
 			kevt.evt.u.exec_v2_done.stats_buf_size = stats_size;
-			kevt.reserved[0] = (uint64_t)network->stats_buf;
-			kevt.reserved[1] = (uint64_t)network->stats_buf_u;
+			kevt.reserved[0] = (uint64_t)network->network_hdl;
+			kevt.reserved[1] = (uint64_t)network_cmd->trans_id;
 			if (npu_queue_event(network->client, &kevt))
 				NPU_ERR("queue npu event failed\n");
 		} else {
-			complete(&network->cmd_done);
+			complete(&network_cmd->cmd_done);
 		}
 		network_put(network);
 		break;
@@ -1369,19 +1533,19 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
 			break;
 		}
 
-		if (network->trans_id != load_rsp_pkt->header.trans_id) {
-			NPU_ERR("load_rsp_pkt trans_id is not match %d:%d\n",
-				network->trans_id,
+		network_cmd = npu_find_network_cmd(network,
+			load_rsp_pkt->header.trans_id);
+		if (!network_cmd) {
+			NPU_ERR("can't find load cmd with trans_id:%d\n",
 				load_rsp_pkt->header.trans_id);
 			network_put(network);
 			break;
 		}
 
 		network->network_hdl = load_rsp_pkt->network_hdl;
-		network->cmd_pending = false;
-		network->cmd_ret_status = load_rsp_pkt->header.status;
+		network_cmd->ret_status = load_rsp_pkt->header.status;
 
-		complete(&network->cmd_done);
+		complete(&network_cmd->cmd_done);
 		network_put(network);
 		break;
 	}
@@ -1402,18 +1566,18 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
 			break;
 		}
 
-		if (network->trans_id != unload_rsp_pkt->header.trans_id) {
-			NPU_ERR("unload_rsp_pkt trans_id is not match %d:%d\n",
-				network->trans_id,
+		network_cmd = npu_find_network_cmd(network,
+			unload_rsp_pkt->header.trans_id);
+		if (!network_cmd) {
+			NPU_ERR("can't find unload cmd with trans_id:%d\n",
 				unload_rsp_pkt->header.trans_id);
 			network_put(network);
 			break;
 		}
 
-		network->cmd_pending = false;
-		network->cmd_ret_status = unload_rsp_pkt->header.status;
+		network_cmd->ret_status = unload_rsp_pkt->header.status;
 
-		complete(&network->cmd_done);
+		complete(&network_cmd->cmd_done);
 		network_put(network);
 		break;
 	}
@@ -1615,7 +1779,8 @@ int32_t npu_host_unmap_buf(struct npu_client *client,
 }
 
 static int npu_send_network_cmd(struct npu_device *npu_dev,
-	struct npu_network *network, void *cmd_ptr, bool async, bool force)
+	struct npu_network *network, void *cmd_ptr,
+	struct npu_network_cmd *cmd)
 {
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 	int ret = 0;
@@ -1626,22 +1791,15 @@ static int npu_send_network_cmd(struct npu_device *npu_dev,
 		(host_ctx->fw_state != FW_ENABLED)) {
 		NPU_ERR("fw is in error state or disabled\n");
 		ret = -EIO;
-	} else if (network->cmd_pending && !force) {
-		NPU_ERR("Another cmd is pending\n");
-		ret = -EBUSY;
 	} else {
-		network->cmd_async = async;
-		network->cmd_ret_status = 0;
-		network->cmd_pending = true;
-		network->trans_id = atomic_read(&host_ctx->ipc_trans_id);
-		reinit_completion(&network->cmd_done);
+		if (cmd)
+			reinit_completion(&cmd->cmd_done);
 		NPU_DBG("Send cmd %d network id %llx trans id %d\n",
 			((struct ipc_cmd_header_pkt *)cmd_ptr)->cmd_type,
-			network->id, network->trans_id);
+			network->id,
+			((struct ipc_cmd_header_pkt *)cmd_ptr)->trans_id);
 		ret = npu_host_ipc_send_cmd(npu_dev,
 			IPC_QUEUE_APPS_EXEC, cmd_ptr);
-		if (ret)
-			network->cmd_pending = false;
 	}
 
 	return ret;
@@ -1842,6 +2000,12 @@ int32_t npu_host_get_fw_property(struct npu_device *npu_dev,
 	struct msm_npu_property *prop_from_fw;
 	uint32_t num_of_params, pkt_size;
 
+	if (property->prop_id < MSM_NPU_FW_PROP_ID_START) {
+		NPU_ERR("Not supproted fw property id %x\n",
+			property->prop_id);
+		return -EINVAL;
+	}
+
 	num_of_params = min_t(uint32_t, property->num_of_params,
 		(uint32_t)PROP_PARAM_MAX_SIZE);
 	pkt_size = sizeof(*prop_packet) + num_of_params * sizeof(uint32_t);
@@ -1920,6 +2084,7 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
 	struct ipc_cmd_load_pkt_v2 *load_packet = NULL;
 	struct ipc_cmd_unload_pkt unload_packet;
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+	struct npu_network_cmd *load_cmd = NULL;
 	uint32_t num_patch_params, pkt_size;
 
 	ret = enable_fw(npu_dev);
@@ -1984,16 +2149,30 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
 	load_packet->buf_pkt.num_layers = network->num_layers;
 	load_packet->num_patch_params = num_patch_params;
 
-	ret = npu_send_network_cmd(npu_dev, network, load_packet, false, false);
+	load_cmd = npu_alloc_network_cmd(host_ctx, 0);
+	if (!load_cmd) {
+		NPU_ERR("Can't allocate load_cmd\n");
+		ret = -ENOMEM;
+		goto error_free_network;
+	}
+
+	load_cmd->cmd_id = 0;
+	load_cmd->cmd_type = NPU_IPC_CMD_LOAD_V2;
+	load_cmd->trans_id = load_packet->header.trans_id;
+	load_cmd->async = false;
+	npu_queue_network_cmd(network, load_cmd);
+
+	/* NPU_IPC_CMD_LOAD_V2 will go onto IPC_QUEUE_APPS_EXEC */
+	ret = npu_send_network_cmd(npu_dev, network, load_packet, load_cmd);
 	if (ret) {
 		NPU_ERR("NPU_IPC_CMD_LOAD_V2 sent failed: %d\n", ret);
-		goto error_free_network;
+		goto free_load_cmd;
 	}
 
 	mutex_unlock(&host_ctx->lock);
 
 	ret = wait_for_completion_timeout(
-		&network->cmd_done,
+		&load_cmd->cmd_done,
 		(host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
 		NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
 
@@ -2002,24 +2181,28 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
 	if (network->fw_error) {
 		ret = -EIO;
 		NPU_ERR("fw is in error state during load_v2 network\n");
-		goto error_free_network;
+		goto free_load_cmd;
 	}
 
 	if (!ret) {
 		NPU_ERR("npu: NPU_IPC_CMD_LOAD time out %lld:%d\n",
-			network->id, network->trans_id);
+			network->id, load_cmd->trans_id);
 		npu_dump_debug_info(npu_dev);
 		ret = -ETIMEDOUT;
 		goto error_load_network;
 	}
 
-	ret = network->cmd_ret_status;
-	if (ret)
-		goto error_free_network;
+	ret = load_cmd->ret_status;
+	if (ret) {
+		NPU_ERR("load network failed status %d\n", ret);
+		goto free_load_cmd;
+	}
 
 	load_ioctl->network_hdl = network->network_hdl;
 	network->is_active = true;
 	kfree(load_packet);
+	npu_dequeue_network_cmd(network, load_cmd);
+	npu_free_network_cmd(host_ctx, load_cmd);
 	network_put(network);
 	mutex_unlock(&host_ctx->lock);
 
@@ -2034,9 +2217,12 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
 		atomic_add_return(1, &host_ctx->ipc_trans_id);
 	unload_packet.header.flags = 0;
 	unload_packet.network_hdl = (uint32_t)network->network_hdl;
-	npu_send_network_cmd(npu_dev, network, &unload_packet, false, true);
+	npu_send_network_cmd(npu_dev, network, &unload_packet, NULL);
 	/* wait 200 ms to make sure fw has processed this command */
 	msleep(200);
+free_load_cmd:
+	npu_dequeue_network_cmd(network, load_cmd);
+	npu_free_network_cmd(host_ctx, load_cmd);
 error_free_network:
 	kfree(load_packet);
 	network_put(network);
@@ -2055,6 +2241,7 @@ int32_t npu_host_unload_network(struct npu_client *client,
 	struct ipc_cmd_unload_pkt unload_packet;
 	struct npu_network *network;
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+	struct npu_network_cmd *unload_cmd = NULL;
 
 	/* get the corresponding network for ipc trans id purpose */
 	mutex_lock(&host_ctx->lock);
@@ -2086,8 +2273,22 @@ int32_t npu_host_unload_network(struct npu_client *client,
 	unload_packet.header.flags = 0;
 	unload_packet.network_hdl = (uint32_t)network->network_hdl;
 
-	ret = npu_send_network_cmd(npu_dev, network, &unload_packet, false,
-		false);
+	unload_cmd = npu_alloc_network_cmd(host_ctx, 0);
+	if (!unload_cmd) {
+		NPU_ERR("Can't allocate unload_cmd\n");
+		ret = -ENOMEM;
+		goto free_network;
+	}
+
+	unload_cmd->cmd_id = 0;
+	unload_cmd->cmd_type = NPU_IPC_CMD_UNLOAD;
+	unload_cmd->trans_id = unload_packet.header.trans_id;
+	unload_cmd->async = false;
+	npu_queue_network_cmd(network, unload_cmd);
+
+	/* NPU_IPC_CMD_UNLOAD will go onto IPC_QUEUE_APPS_EXEC */
+	ret = npu_send_network_cmd(npu_dev, network, &unload_packet,
+		unload_cmd);
 
 	if (ret) {
 		NPU_ERR("NPU_IPC_CMD_UNLOAD sent failed: %d\n", ret);
@@ -2097,17 +2298,19 @@ int32_t npu_host_unload_network(struct npu_client *client,
 		 */
 		if (ret == -EBUSY) {
 			NPU_ERR("Network is running, retry later\n");
+			npu_dequeue_network_cmd(network, unload_cmd);
+			npu_free_network_cmd(host_ctx, unload_cmd);
 			network_put(network);
 			mutex_unlock(&host_ctx->lock);
 			return ret;
 		}
-		goto free_network;
+		goto free_unload_cmd;
 	}
 
 	mutex_unlock(&host_ctx->lock);
 
 	ret = wait_for_completion_timeout(
-		&network->cmd_done,
+		&unload_cmd->cmd_done,
 		(host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
 		NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
 
@@ -2121,16 +2324,18 @@ int32_t npu_host_unload_network(struct npu_client *client,
 
 	if (!ret) {
 		NPU_ERR("npu: NPU_IPC_CMD_UNLOAD time out %llx:%d\n",
-			network->id, network->trans_id);
+			network->id, unload_cmd->trans_id);
 		npu_dump_debug_info(npu_dev);
-		network->cmd_pending = false;
 		ret = -ETIMEDOUT;
-		goto free_network;
+		goto free_unload_cmd;
 	}
 
-	ret = network->cmd_ret_status;
+	ret = unload_cmd->ret_status;
 	NPU_DBG("unload network status %d\n", ret);
 
+free_unload_cmd:
+	npu_dequeue_network_cmd(network, unload_cmd);
+	npu_free_network_cmd(host_ctx, unload_cmd);
 free_network:
 	/*
 	 * free the network on the kernel if the corresponding ACO
@@ -2156,6 +2361,7 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
 {
 	struct npu_device *npu_dev = client->npu_dev;
 	struct ipc_cmd_execute_pkt_v2 *exec_packet;
+	struct npu_network_cmd *exec_cmd = NULL;
 	int32_t ret;
 	struct npu_network *network;
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
@@ -2184,6 +2390,14 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
 		goto exec_v2_done;
 	}
 
+	if (network->is_async && !async_ioctl) {
+		NPU_ERR("network is in async mode\n");
+		ret = -EINVAL;
+		goto exec_v2_done;
+	}
+
+	network->is_async = async_ioctl;
+
 	NPU_DBG("execute_v2 network %lld\n", network->id);
 	num_patch_params = exec_ioctl->patch_buf_info_num;
 	pkt_size = num_patch_params * sizeof(struct npu_patch_params_v2) +
@@ -2222,18 +2436,28 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
 	exec_packet->network_hdl = network->network_hdl;
 	exec_packet->num_patch_params = num_patch_params;
 
-	network->stats_buf_u = (void __user *)exec_ioctl->stats_buf_addr;
-	network->stats_buf_size = exec_ioctl->stats_buf_size;
+	exec_cmd = npu_alloc_network_cmd(host_ctx, exec_ioctl->stats_buf_size);
+	if (!exec_cmd) {
+		NPU_ERR("Can't allocate exec_cmd\n");
+		ret = -ENOMEM;
+		goto free_exec_packet;
+	}
+
+	exec_cmd->stats_buf_u = (void __user *)exec_ioctl->stats_buf_addr;
+	exec_cmd->cmd_id = exec_ioctl->async;
+	exec_cmd->cmd_type = NPU_IPC_CMD_EXECUTE_V2;
+	exec_cmd->trans_id = exec_packet->header.trans_id;
+	exec_cmd->async = async_ioctl;
+	npu_queue_network_cmd(network, exec_cmd);
 
 	NPU_DBG("Execute_v2 flags %x stats_buf_size %d\n",
 		exec_packet->header.flags, exec_ioctl->stats_buf_size);
 
-	ret = npu_send_network_cmd(npu_dev, network, exec_packet, async_ioctl,
-		false);
+	ret = npu_send_network_cmd(npu_dev, network, exec_packet, exec_cmd);
 
 	if (ret) {
 		NPU_ERR("NPU_IPC_CMD_EXECUTE_V2 sent failed: %d\n", ret);
-		goto free_exec_packet;
+		goto free_exec_cmd;
 	}
 
 	if (async_ioctl) {
@@ -2244,7 +2468,7 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
 	mutex_unlock(&host_ctx->lock);
 
 	ret = wait_for_completion_timeout(
-		&network->cmd_done,
+		&exec_cmd->cmd_done,
 		(host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
 		NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
 
@@ -2252,32 +2476,35 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
 	if (network->fw_error) {
 		ret = -EIO;
 		NPU_ERR("fw is in error state during execute_v2 network\n");
-		goto free_exec_packet;
+		goto free_exec_cmd;
 	}
 
 	if (!ret) {
 		NPU_ERR("npu: %llx:%d NPU_IPC_CMD_EXECUTE_V2 time out\n",
-			network->id, network->trans_id);
+			network->id, exec_cmd->trans_id);
 		npu_dump_debug_info(npu_dev);
-		network->cmd_pending = false;
 		ret = -ETIMEDOUT;
 		goto free_exec_packet;
 	}
 
-	ret = network->cmd_ret_status;
-	if (!ret) {
-		exec_ioctl->stats_buf_size = network->stats_buf_size;
-		if (copy_to_user(
-			(void __user *)exec_ioctl->stats_buf_addr,
-			network->stats_buf,
-			exec_ioctl->stats_buf_size)) {
-			NPU_ERR("copy stats to user failed\n");
-			exec_ioctl->stats_buf_size = 0;
-		}
-	} else {
+	ret = exec_cmd->ret_status;
+	if (ret) {
 		NPU_ERR("execution failed %d\n", ret);
+		goto free_exec_cmd;
 	}
 
+	exec_ioctl->stats_buf_size = exec_cmd->stats_buf_size;
+	if (copy_to_user(
+		(void __user *)exec_ioctl->stats_buf_addr,
+		exec_cmd->stats_buf,
+		exec_ioctl->stats_buf_size)) {
+		NPU_ERR("copy stats to user failed\n");
+		exec_ioctl->stats_buf_size = 0;
+	}
+
+free_exec_cmd:
+	npu_dequeue_network_cmd(network, exec_cmd);
+	npu_free_network_cmd(host_ctx, exec_cmd);
 free_exec_packet:
 	kfree(exec_packet);
 exec_v2_done:
diff --git a/drivers/media/platform/msm/npu/npu_mgr.h b/drivers/media/platform/msm/npu/npu_mgr.h
index 9ef7883..44bb7c9 100644
--- a/drivers/media/platform/msm/npu/npu_mgr.h
+++ b/drivers/media/platform/msm/npu/npu_mgr.h
@@ -36,6 +36,21 @@
  * Data Structures
  * -------------------------------------------------------------------------
  */
+
+struct npu_network_cmd {
+	struct list_head list;
+	uint32_t cmd_type;
+	uint32_t cmd_id;
+	uint32_t trans_id;
+	bool async;
+	struct completion cmd_done;
+	/* stats buf info */
+	uint32_t stats_buf_size;
+	void __user *stats_buf_u;
+	void *stats_buf;
+	int ret_status;
+};
+
 struct npu_network {
 	uint64_t id;
 	int buf_hdl;
@@ -47,19 +62,13 @@ struct npu_network {
 	uint32_t cur_perf_mode;
 	uint32_t init_perf_mode;
 	uint32_t num_layers;
-	void *stats_buf;
-	void __user *stats_buf_u;
-	uint32_t stats_buf_size;
-	uint32_t trans_id;
 	atomic_t ref_cnt;
 	bool is_valid;
 	bool is_active;
 	bool fw_error;
-	bool cmd_pending;
-	bool cmd_async;
-	int cmd_ret_status;
-	struct completion cmd_done;
+	bool is_async;
 	struct npu_client *client;
+	struct list_head cmd_list;
 };
 
 enum fw_state {
@@ -92,6 +101,8 @@ struct npu_host_ctx {
 	void *prop_buf;
 	int32_t network_num;
 	struct npu_network networks[MAX_LOADED_NETWORK];
+	struct kmem_cache *network_cmd_cache;
+	struct kmem_cache *stats_buf_cache;
 	bool sys_cache_disable;
 	bool auto_pil_disable;
 	uint32_t fw_dbg_mode;
@@ -129,6 +140,7 @@ int npu_host_ipc_send_cmd(struct npu_device *npu_dev, uint32_t queueIndex,
 	void *pCmd);
 int npu_host_ipc_read_msg(struct npu_device *npu_dev, uint32_t queueIndex,
 	uint32_t *pMsg);
+int npu_host_get_ipc_queue_size(struct npu_device *npu_dev, uint32_t q_idx);
 
 int32_t npu_host_get_info(struct npu_device *npu_dev,
 	struct msm_npu_get_info_ioctl *get_info_ioctl);
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index 5accb52..6e3f234 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -641,8 +641,7 @@ static int v4l_stk_release(struct file *fp)
 		dev->owner = NULL;
 	}
 
-	if (is_present(dev))
-		usb_autopm_put_interface(dev->interface);
+	usb_autopm_put_interface(dev->interface);
 	mutex_unlock(&dev->lock);
 	return v4l2_fh_release(fp);
 }
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index bcdca9f..29f5021 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -949,7 +949,7 @@ static int jmb38x_ms_probe(struct pci_dev *pdev,
 	if (!cnt) {
 		rc = -ENODEV;
 		pci_dev_busy = 1;
-		goto err_out;
+		goto err_out_int;
 	}
 
 	jm = kzalloc(sizeof(struct jmb38x_ms)
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index a6f41f9..198e030 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -214,13 +214,21 @@ static void mei_mkhi_fix(struct mei_cl_device *cldev)
 {
 	int ret;
 
+	/* No need to enable the client if nothing is needed from it */
+	if (!cldev->bus->fw_f_fw_ver_supported &&
+	    !cldev->bus->hbm_f_os_supported)
+		return;
+
 	ret = mei_cldev_enable(cldev);
 	if (ret)
 		return;
 
-	ret = mei_fwver(cldev);
-	if (ret < 0)
-		dev_err(&cldev->dev, "FW version command failed %d\n", ret);
+	if (cldev->bus->fw_f_fw_ver_supported) {
+		ret = mei_fwver(cldev);
+		if (ret < 0)
+			dev_err(&cldev->dev, "FW version command failed %d\n",
+				ret);
+	}
 
 	if (cldev->bus->hbm_f_os_supported) {
 		ret = mei_osver(cldev);
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index cdd7af1..f85aa3f 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -139,6 +139,9 @@
 #define MEI_DEV_ID_CNP_H      0xA360  /* Cannon Point H */
 #define MEI_DEV_ID_CNP_H_4    0xA364  /* Cannon Point H 4 (iTouch) */
 
+#define MEI_DEV_ID_CMP_LP     0x02e0  /* Comet Point LP */
+#define MEI_DEV_ID_CMP_LP_3   0x02e4  /* Comet Point LP 3 (iTouch) */
+
 #define MEI_DEV_ID_ICP_LP     0x34E0  /* Ice Lake Point LP */
 
 #define MEI_DEV_ID_TGP_LP     0xA0E0  /* Tiger Lake Point LP */
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 0759c3a..60c8c84 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -1368,6 +1368,8 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev)
 #define MEI_CFG_FW_SPS                           \
 	.quirk_probe = mei_me_fw_type_sps
 
+#define MEI_CFG_FW_VER_SUPP                     \
+	.fw_ver_supported = 1
 
 #define MEI_CFG_ICH_HFS                      \
 	.fw_status.count = 0
@@ -1405,31 +1407,41 @@ static const struct mei_cfg mei_me_ich10_cfg = {
 	MEI_CFG_ICH10_HFS,
 };
 
-/* PCH devices */
-static const struct mei_cfg mei_me_pch_cfg = {
+/* PCH6 devices */
+static const struct mei_cfg mei_me_pch6_cfg = {
 	MEI_CFG_PCH_HFS,
 };
 
+/* PCH7 devices */
+static const struct mei_cfg mei_me_pch7_cfg = {
+	MEI_CFG_PCH_HFS,
+	MEI_CFG_FW_VER_SUPP,
+};
+
 /* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
 static const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
 	MEI_CFG_PCH_HFS,
+	MEI_CFG_FW_VER_SUPP,
 	MEI_CFG_FW_NM,
 };
 
 /* PCH8 Lynx Point and newer devices */
 static const struct mei_cfg mei_me_pch8_cfg = {
 	MEI_CFG_PCH8_HFS,
+	MEI_CFG_FW_VER_SUPP,
 };
 
 /* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
 static const struct mei_cfg mei_me_pch8_sps_cfg = {
 	MEI_CFG_PCH8_HFS,
+	MEI_CFG_FW_VER_SUPP,
 	MEI_CFG_FW_SPS,
 };
 
 /* Cannon Lake and newer devices */
 static const struct mei_cfg mei_me_pch12_cfg = {
 	MEI_CFG_PCH8_HFS,
+	MEI_CFG_FW_VER_SUPP,
 	MEI_CFG_DMA_128,
 };
 
@@ -1441,7 +1453,8 @@ static const struct mei_cfg *const mei_cfg_list[] = {
 	[MEI_ME_UNDEF_CFG] = NULL,
 	[MEI_ME_ICH_CFG] = &mei_me_ich_cfg,
 	[MEI_ME_ICH10_CFG] = &mei_me_ich10_cfg,
-	[MEI_ME_PCH_CFG] = &mei_me_pch_cfg,
+	[MEI_ME_PCH6_CFG] = &mei_me_pch6_cfg,
+	[MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg,
 	[MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
 	[MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
 	[MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg,
@@ -1480,6 +1493,8 @@ struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
 
 	mei_device_init(dev, &pdev->dev, &mei_me_hw_ops);
 	hw->cfg = cfg;
+	dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;
+
 	return dev;
 }
 
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index bbcc5fc..7759713 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -32,11 +32,13 @@
  * @fw_status: FW status
  * @quirk_probe: device exclusion quirk
  * @dma_size: device DMA buffers size
+ * @fw_ver_supported: is fw version retrievable from FW
  */
 struct mei_cfg {
 	const struct mei_fw_status fw_status;
 	bool (*quirk_probe)(struct pci_dev *pdev);
 	size_t dma_size[DMA_DSCR_NUM];
+	u32 fw_ver_supported:1;
 };
 
 
@@ -74,7 +76,8 @@ struct mei_me_hw {
  * @MEI_ME_UNDEF_CFG:      Lower sentinel.
  * @MEI_ME_ICH_CFG:        I/O Controller Hub legacy devices.
  * @MEI_ME_ICH10_CFG:      I/O Controller Hub platforms Gen10
- * @MEI_ME_PCH_CFG:        Platform Controller Hub platforms (Up to Gen8).
+ * @MEI_ME_PCH6_CFG:       Platform Controller Hub platforms (Gen6).
+ * @MEI_ME_PCH7_CFG:       Platform Controller Hub platforms (Gen7).
  * @MEI_ME_PCH_CPT_PBG_CFG:Platform Controller Hub workstations
  *                         with quirk for Node Manager exclusion.
  * @MEI_ME_PCH8_CFG:       Platform Controller Hub Gen8 and newer
@@ -89,7 +92,8 @@ enum mei_cfg_idx {
 	MEI_ME_UNDEF_CFG,
 	MEI_ME_ICH_CFG,
 	MEI_ME_ICH10_CFG,
-	MEI_ME_PCH_CFG,
+	MEI_ME_PCH6_CFG,
+	MEI_ME_PCH7_CFG,
 	MEI_ME_PCH_CPT_PBG_CFG,
 	MEI_ME_PCH8_CFG,
 	MEI_ME_PCH8_SPS_CFG,
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 377397e..fc7a5e3 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -422,6 +422,8 @@ struct mei_fw_version {
  *
  * @fw_ver : FW versions
  *
+ * @fw_f_fw_ver_supported : fw feature: fw version supported
+ *
  * @me_clients_rwsem: rw lock over me_clients list
  * @me_clients  : list of FW clients
  * @me_clients_map : FW clients bit map
@@ -500,6 +502,8 @@ struct mei_device {
 
 	struct mei_fw_version fw_ver[MEI_MAX_FW_VER_BLOCKS];
 
+	unsigned int fw_f_fw_ver_supported:1;
+
 	struct rw_semaphore me_clients_rwsem;
 	struct list_head me_clients;
 	DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX);
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index e41f9e0..28cdd87 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -70,13 +70,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, MEI_ME_ICH10_CFG)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, MEI_ME_ICH10_CFG)},
 
-	{MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, MEI_ME_PCH_CFG)},
-	{MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, MEI_ME_PCH_CFG)},
+	{MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, MEI_ME_PCH6_CFG)},
+	{MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, MEI_ME_PCH6_CFG)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, MEI_ME_PCH_CPT_PBG_CFG)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, MEI_ME_PCH_CPT_PBG_CFG)},
-	{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH_CFG)},
-	{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH_CFG)},
-	{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH_CFG)},
+	{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH7_CFG)},
+	{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH7_CFG)},
+	{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH7_CFG)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_CFG)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_CFG)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, MEI_ME_PCH8_CFG)},
@@ -105,6 +105,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
 	{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH8_CFG)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)},
 
+	{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
+	{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)},
+
 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
 
 	{MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH12_CFG)},
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 667c06d..7c623c1 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -3029,19 +3029,18 @@ static int qseecom_unload_app(struct qseecom_dev_handle *data,
 				bool app_crash)
 {
 	unsigned long flags;
-	unsigned long flags1;
 	int ret = 0;
 	struct qseecom_registered_app_list *ptr_app = NULL;
-	bool unload = false;
 	bool found_app = false;
-	bool found_dead_app = false;
-	bool doublecheck = false;
 
 	if (!data) {
 		pr_err("Invalid/uninitialized device handle\n");
 		return -EINVAL;
 	}
 
+	pr_debug("unload app %d(%s), app_crash flag %d\n", data->client.app_id,
+			data->client.app_name, app_crash);
+
 	if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
 		pr_debug("Do not unload keymaster app from tz\n");
 		goto unload_exit;
@@ -3050,88 +3049,42 @@ static int qseecom_unload_app(struct qseecom_dev_handle *data,
 	__qseecom_cleanup_app(data);
 	__qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
 
-	if (data->client.app_id > 0) {
-		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
-		list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
-									list) {
-			if (ptr_app->app_id == data->client.app_id) {
-				if (!strcmp((void *)ptr_app->app_name,
-					(void *)data->client.app_name)) {
-					found_app = true;
-					if (ptr_app->app_blocked ||
-							ptr_app->check_block)
-						app_crash = false;
-					if (app_crash || ptr_app->ref_cnt == 1)
-						unload = true;
-					break;
-				}
-				found_dead_app = true;
-				break;
-			}
-		}
-		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
-								flags);
-		if (!found_app && !found_dead_app) {
-			pr_err("Cannot find app with id = %d (%s)\n",
-				data->client.app_id,
-				(char *)data->client.app_name);
-			ret = -EINVAL;
-			goto unload_exit;
+	/* ignore app_id 0, it happens when close qseecom_fd if load app fail*/
+	if (!data->client.app_id)
+		goto unload_exit;
+
+	spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+	list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+								list) {
+		if ((ptr_app->app_id == data->client.app_id) &&
+			(!strcmp(ptr_app->app_name, data->client.app_name))) {
+			pr_debug("unload app %d (%s), ref_cnt %d\n",
+				ptr_app->app_id, ptr_app->app_name,
+				ptr_app->ref_cnt);
+			ptr_app->ref_cnt--;
+			found_app = true;
+			break;
 		}
 	}
+	spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+							flags);
+	if (!found_app) {
+		pr_err("Cannot find app with id = %d (%s)\n",
+			data->client.app_id, data->client.app_name);
+		ret = -EINVAL;
+		goto unload_exit;
+	}
 
-	if (found_dead_app)
-		pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
-			(char *)data->client.app_name);
-
-	if (unload) {
+	if (!ptr_app->ref_cnt) {
 		ret = __qseecom_unload_app(data, data->client.app_id);
-
-		/* double check if this app_entry still exists */
-		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
-		list_for_each_entry(ptr_app,
-			&qseecom.registered_app_list_head, list) {
-			if ((ptr_app->app_id == data->client.app_id) &&
-				(!strcmp((void *)ptr_app->app_name,
-				(void *)data->client.app_name))) {
-				doublecheck = true;
-				break;
-			}
-		}
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_del(&ptr_app->list);
 		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
-								flags1);
-		if (!doublecheck) {
-			pr_warn("app %d(%s) entry is already removed\n",
-				data->client.app_id,
-				(char *)data->client.app_name);
-			found_app = false;
-		}
+					flags);
+		kzfree(ptr_app);
 	}
 
 unload_exit:
-	if (found_app) {
-		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
-		if (app_crash) {
-			ptr_app->ref_cnt = 0;
-			pr_debug("app_crash: ref_count = 0\n");
-		} else {
-			if (ptr_app->ref_cnt == 1) {
-				ptr_app->ref_cnt = 0;
-				pr_debug("ref_count set to 0\n");
-			} else {
-				ptr_app->ref_cnt--;
-				pr_debug("Can't unload app(%d) inuse\n",
-					ptr_app->app_id);
-			}
-		}
-		if (unload) {
-			list_del(&ptr_app->list);
-			kzfree(ptr_app);
-		}
-		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
-								flags1);
-	}
-
 	if (data->client.dmabuf)
 		qseecom_vaddr_unmap(data->client.sb_virt, data->client.sgt,
 			data->client.attach, data->client.dmabuf);
diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c
index 2d13f08..ad3ffe7 100644
--- a/drivers/mmc/host/cqhci.c
+++ b/drivers/mmc/host/cqhci.c
@@ -721,6 +721,8 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 		BUG();
 	}
 	mmc_log_string(mmc, "tag: %d\n", tag);
+	/* Make sure descriptors are ready before ringing the doorbell */
+	wmb();
 	cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
 	/* Commit the doorbell write immediately */
 	wmb();
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index e5c598ae..6627523 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -480,7 +480,12 @@ static int esdhc_of_enable_dma(struct sdhci_host *host)
 		dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
 
 	value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
-	value |= ESDHC_DMA_SNOOP;
+
+	if (of_dma_is_coherent(dev->of_node))
+		value |= ESDHC_DMA_SNOOP;
+	else
+		value &= ~ESDHC_DMA_SNOOP;
+
 	sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
 	return 0;
 }
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 4025484..7fd1626 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -3294,6 +3294,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
 static void sdhci_adma_show_error(struct sdhci_host *host)
 {
 	void *desc = host->adma_table;
+	dma_addr_t dma = host->adma_addr;
 
 	sdhci_dumpregs(host);
 
@@ -3301,18 +3302,21 @@ static void sdhci_adma_show_error(struct sdhci_host *host)
 		struct sdhci_adma2_64_desc *dma_desc = desc;
 
 		if (host->flags & SDHCI_USE_64_BIT_DMA)
-			DBG("%pK: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
-			    desc, le32_to_cpu(dma_desc->addr_hi),
+			SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
+			    (unsigned long long)dma,
+			    le32_to_cpu(dma_desc->addr_hi),
 			    le32_to_cpu(dma_desc->addr_lo),
 			    le16_to_cpu(dma_desc->len),
 			    le16_to_cpu(dma_desc->cmd));
 		else
-			DBG("%pK: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
-			    desc, le32_to_cpu(dma_desc->addr_lo),
+			SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
+			    (unsigned long long)dma,
+			    le32_to_cpu(dma_desc->addr_lo),
 			    le16_to_cpu(dma_desc->len),
 			    le16_to_cpu(dma_desc->cmd));
 
 		desc += host->desc_sz;
+		dma += host->desc_sz;
 
 		if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
 			break;
@@ -3409,7 +3413,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
 		host->mmc->err_stats[MMC_ERR_DAT_CRC]++;
 	}
 	else if (intmask & SDHCI_INT_ADMA_ERROR) {
-		pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
+		pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
+		       intmask);
 		sdhci_adma_show_error(host);
 		host->mmc->err_stats[MMC_ERR_ADMA]++;
 		host->data->error = -EIO;
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index fccb6bf..de8d9dc 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -626,7 +626,7 @@ static int mcp251x_setup(struct net_device *net, struct spi_device *spi)
 static int mcp251x_hw_reset(struct spi_device *spi)
 {
 	struct mcp251x_priv *priv = spi_get_drvdata(spi);
-	u8 reg;
+	unsigned long timeout;
 	int ret;
 
 	/* Wait for oscillator startup timer after power up */
@@ -640,10 +640,19 @@ static int mcp251x_hw_reset(struct spi_device *spi)
 	/* Wait for oscillator startup timer after reset */
 	mdelay(MCP251X_OST_DELAY_MS);
 
-	reg = mcp251x_read_reg(spi, CANSTAT);
-	if ((reg & CANCTRL_REQOP_MASK) != CANCTRL_REQOP_CONF)
-		return -ENODEV;
+	/* Wait for reset to finish */
+	timeout = jiffies + HZ;
+	while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) !=
+	       CANCTRL_REQOP_CONF) {
+		usleep_range(MCP251X_OST_DELAY_MS * 1000,
+			     MCP251X_OST_DELAY_MS * 1000 * 2);
 
+		if (time_after(jiffies, timeout)) {
+			dev_err(&spi->dev,
+				"MCP251x didn't enter in conf mode after reset\n");
+			return -EBUSY;
+		}
+	}
 	return 0;
 }
 
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index bdd8f2d..33232cc 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -543,7 +543,7 @@ qca8k_setup(struct dsa_switch *ds)
 		    BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
 
 	/* Setup connection between CPU port & user ports */
-	for (i = 0; i < DSA_MAX_PORTS; i++) {
+	for (i = 0; i < QCA8K_NUM_PORTS; i++) {
 		/* CPU port gets connected to all user ports of the switch */
 		if (dsa_is_cpu_port(ds, i)) {
 			qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT),
@@ -897,7 +897,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
 	if (id != QCA8K_ID_QCA8337)
 		return -ENODEV;
 
-	priv->ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS);
+	priv->ds = dsa_switch_alloc(&mdiodev->dev, QCA8K_NUM_PORTS);
 	if (!priv->ds)
 		return -ENOMEM;
 
diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
index a4d5049..f4b14b6 100644
--- a/drivers/net/dsa/rtl8366rb.c
+++ b/drivers/net/dsa/rtl8366rb.c
@@ -507,7 +507,8 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
 	irq = of_irq_get(intc, 0);
 	if (irq <= 0) {
 		dev_err(smi->dev, "failed to get parent IRQ\n");
-		return irq ? irq : -EINVAL;
+		ret = irq ? irq : -EINVAL;
+		goto out_put_node;
 	}
 
 	/* This clears the IRQ status register */
@@ -515,7 +516,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
 			  &val);
 	if (ret) {
 		dev_err(smi->dev, "can't read interrupt status\n");
-		return ret;
+		goto out_put_node;
 	}
 
 	/* Fetch IRQ edge information from the descriptor */
@@ -537,7 +538,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
 				 val);
 	if (ret) {
 		dev_err(smi->dev, "could not configure IRQ polarity\n");
-		return ret;
+		goto out_put_node;
 	}
 
 	ret = devm_request_threaded_irq(smi->dev, irq, NULL,
@@ -545,7 +546,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
 					"RTL8366RB", smi);
 	if (ret) {
 		dev_err(smi->dev, "unable to request irq: %d\n", ret);
-		return ret;
+		goto out_put_node;
 	}
 	smi->irqdomain = irq_domain_add_linear(intc,
 					       RTL8366RB_NUM_INTERRUPT,
@@ -553,12 +554,15 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
 					       smi);
 	if (!smi->irqdomain) {
 		dev_err(smi->dev, "failed to create IRQ domain\n");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto out_put_node;
 	}
 	for (i = 0; i < smi->num_ports; i++)
 		irq_set_parent(irq_create_mapping(smi->irqdomain, i), irq);
 
-	return 0;
+out_put_node:
+	of_node_put(intc);
+	return ret;
 }
 
 static int rtl8366rb_set_addr(struct realtek_smi *smi)
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 14b49612a..4dabf37 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -369,6 +369,7 @@ struct bcmgenet_mib_counters {
 #define  EXT_PWR_DOWN_PHY_EN		(1 << 20)
 
 #define EXT_RGMII_OOB_CTRL		0x0C
+#define  RGMII_MODE_EN_V123		(1 << 0)
 #define  RGMII_LINK			(1 << 4)
 #define  OOB_DISABLE			(1 << 5)
 #define  RGMII_MODE_EN			(1 << 6)
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index de0e24d..0d527fa 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -261,7 +261,11 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
 	 */
 	if (priv->ext_phy) {
 		reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
-		reg |= RGMII_MODE_EN | id_mode_dis;
+		reg |= id_mode_dis;
+		if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
+			reg |= RGMII_MODE_EN_V123;
+		else
+			reg |= RGMII_MODE_EN;
 		bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
 	}
 
@@ -276,11 +280,12 @@ int bcmgenet_mii_probe(struct net_device *dev)
 	struct bcmgenet_priv *priv = netdev_priv(dev);
 	struct device_node *dn = priv->pdev->dev.of_node;
 	struct phy_device *phydev;
-	u32 phy_flags;
+	u32 phy_flags = 0;
 	int ret;
 
 	/* Communicate the integrated PHY revision */
-	phy_flags = priv->gphy_rev;
+	if (priv->internal_phy)
+		phy_flags = priv->gphy_rev;
 
 	/* Initialize link state variables that bcmgenet_mii_setup() uses */
 	priv->old_link = -1;
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index baf5cc2..9a3bc09 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -156,11 +156,15 @@ static int mdio_sc_cfg_reg_write(struct hns_mdio_device *mdio_dev,
 {
 	u32 time_cnt;
 	u32 reg_value;
+	int ret;
 
 	regmap_write(mdio_dev->subctrl_vbase, cfg_reg, set_val);
 
 	for (time_cnt = MDIO_TIMEOUT; time_cnt; time_cnt--) {
-		regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
+		ret = regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
+		if (ret)
+			return ret;
+
 		reg_value &= st_msk;
 		if ((!!check_st) == (!!reg_value))
 			break;
diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c
index b69c622..6f0e401 100644
--- a/drivers/net/ethernet/i825xx/lasi_82596.c
+++ b/drivers/net/ethernet/i825xx/lasi_82596.c
@@ -96,6 +96,8 @@
 
 #define OPT_SWAP_PORT	0x0001	/* Need to wordswp on the MPU port */
 
+#define LIB82596_DMA_ATTR	DMA_ATTR_NON_CONSISTENT
+
 #define DMA_WBACK(ndev, addr, len) \
 	do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0)
 
@@ -199,7 +201,7 @@ static int __exit lan_remove_chip(struct parisc_device *pdev)
 
 	unregister_netdev (dev);
 	dma_free_attrs(&pdev->dev, sizeof(struct i596_private), lp->dma,
-		       lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
+		       lp->dma_addr, LIB82596_DMA_ATTR);
 	free_netdev (dev);
 	return 0;
 }
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index 2f7ae11..d0e8193 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -1065,7 +1065,7 @@ static int i82596_probe(struct net_device *dev)
 
 	dma = dma_alloc_attrs(dev->dev.parent, sizeof(struct i596_dma),
 			      &lp->dma_addr, GFP_KERNEL,
-			      DMA_ATTR_NON_CONSISTENT);
+			      LIB82596_DMA_ATTR);
 	if (!dma) {
 		printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
 		return -ENOMEM;
@@ -1087,7 +1087,7 @@ static int i82596_probe(struct net_device *dev)
 	i = register_netdev(dev);
 	if (i) {
 		dma_free_attrs(dev->dev.parent, sizeof(struct i596_dma),
-			       dma, lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
+			       dma, lp->dma_addr, LIB82596_DMA_ATTR);
 		return i;
 	}
 
diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
index b2c04a7..43c1fd1 100644
--- a/drivers/net/ethernet/i825xx/sni_82596.c
+++ b/drivers/net/ethernet/i825xx/sni_82596.c
@@ -23,6 +23,8 @@
 
 static const char sni_82596_string[] = "snirm_82596";
 
+#define LIB82596_DMA_ATTR	0
+
 #define DMA_WBACK(priv, addr, len)     do { } while (0)
 #define DMA_INV(priv, addr, len)       do { } while (0)
 #define DMA_WBACK_INV(priv, addr, len) do { } while (0)
@@ -151,7 +153,7 @@ static int sni_82596_driver_remove(struct platform_device *pdev)
 
 	unregister_netdev(dev);
 	dma_free_attrs(dev->dev.parent, sizeof(struct i596_private), lp->dma,
-		       lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
+		       lp->dma_addr, LIB82596_DMA_ATTR);
 	iounmap(lp->ca);
 	iounmap(lp->mpu_port);
 	free_netdev (dev);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index aa067a7a..8fa1473 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2731,12 +2731,10 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
 
 	if (adapter->resetting &&
 	    adapter->reset_reason == VNIC_RESET_MOBILITY) {
-		u64 val = (0xff000000) | scrq->hw_irq;
+		struct irq_desc *desc = irq_to_desc(scrq->irq);
+		struct irq_chip *chip = irq_desc_get_chip(desc);
 
-		rc = plpar_hcall_norets(H_EOI, val);
-		if (rc)
-			dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
-				val, rc);
+		chip->irq_eoi(&desc->irq_data);
 	}
 
 	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 22c572a..c19e88e 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -272,6 +272,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
 		port = nfp_port_alloc(app, port_type, repr);
 		if (IS_ERR(port)) {
 			err = PTR_ERR(port);
+			kfree(repr_priv);
 			nfp_repr_free(repr);
 			goto err_reprs_clean;
 		}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 0101eba..014fe93 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -4522,8 +4522,10 @@ int stmmac_suspend(struct device *dev)
 		stmmac_mac_set(priv, priv->ioaddr, false);
 		pinctrl_pm_select_sleep_state(priv->device);
 		/* Disable clock in case of PWM is off */
-		clk_disable(priv->plat->pclk);
-		clk_disable(priv->plat->stmmac_clk);
+		if (priv->plat->clk_ptp_ref)
+			clk_disable_unprepare(priv->plat->clk_ptp_ref);
+		clk_disable_unprepare(priv->plat->pclk);
+		clk_disable_unprepare(priv->plat->stmmac_clk);
 	}
 	mutex_unlock(&priv->lock);
 
@@ -4588,8 +4590,10 @@ int stmmac_resume(struct device *dev)
 	} else {
 		pinctrl_pm_select_default_state(priv->device);
 		/* enable the clk previously disabled */
-		clk_enable(priv->plat->stmmac_clk);
-		clk_enable(priv->plat->pclk);
+		clk_prepare_enable(priv->plat->stmmac_clk);
+		clk_prepare_enable(priv->plat->pclk);
+		if (priv->plat->clk_ptp_ref)
+			clk_prepare_enable(priv->plat->clk_ptp_ref);
 		/* reset the phy so that it's ready */
 		if (priv->mii)
 			stmmac_mdio_reset(priv->mii);
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
index 4f684cb..078027b 100644
--- a/drivers/net/ieee802154/atusb.c
+++ b/drivers/net/ieee802154/atusb.c
@@ -1140,10 +1140,11 @@ static void atusb_disconnect(struct usb_interface *interface)
 
 	ieee802154_unregister_hw(atusb->hw);
 
+	usb_put_dev(atusb->usb_dev);
+
 	ieee802154_free_hw(atusb->hw);
 
 	usb_set_intfdata(interface, NULL);
-	usb_put_dev(atusb->usb_dev);
 
 	pr_debug("%s done\n", __func__);
 }
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index b2ff903..38a4165 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -3151,12 +3151,12 @@ static int ca8210_probe(struct spi_device *spi_device)
 		goto error;
 	}
 
+	priv->spi->dev.platform_data = pdata;
 	ret = ca8210_get_platform_data(priv->spi, pdata);
 	if (ret) {
 		dev_crit(&spi_device->dev, "ca8210_get_platform_data failed\n");
 		goto error;
 	}
-	priv->spi->dev.platform_data = pdata;
 
 	ret = ca8210_dev_com_init(priv);
 	if (ret) {
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index a065a61..a291e5f 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -4474,10 +4474,9 @@ static int rtl8152_reset_resume(struct usb_interface *intf)
 	struct r8152 *tp = usb_get_intfdata(intf);
 
 	clear_bit(SELECTIVE_SUSPEND, &tp->flags);
-	mutex_lock(&tp->control);
 	tp->rtl_ops.init(tp);
 	queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
-	mutex_unlock(&tp->control);
+	set_ethernet_addr(tp);
 	return rtl8152_resume(intf);
 }
 
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 67d98c5..15c76a4 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -967,6 +967,11 @@ static int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv,
 		break;
 	case CNSS_MHI_POWER_ON:
 		ret = mhi_sync_power_up(pci_priv->mhi_ctrl);
+		/* -ETIMEDOUT means MHI power up has succeeded but timed out
+		 * for firmware mission mode event, so handle it properly.
+		 */
+		if (ret == -ETIMEDOUT)
+			ret = 0;
 		break;
 	case CNSS_MHI_POWER_OFF:
 		mhi_power_down(pci_priv->mhi_ctrl, true);
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index 479a4cf..5f998ea 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -775,6 +775,9 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
 				return;
 			} else {
 				noa_num = (noa_len - 2) / 13;
+				if (noa_num > P2P_MAX_NOA_NUM)
+					noa_num = P2P_MAX_NOA_NUM;
+
 			}
 			noa_index = ie[3];
 			if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode ==
@@ -869,6 +872,9 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
 				return;
 			} else {
 				noa_num = (noa_len - 2) / 13;
+				if (noa_num > P2P_MAX_NOA_NUM)
+					noa_num = P2P_MAX_NOA_NUM;
+
 			}
 			noa_index = ie[3];
 			if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode ==
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 82add0a..27b6b14 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -718,7 +718,6 @@ int xenvif_connect_data(struct xenvif_queue *queue,
 	xenvif_unmap_frontend_data_rings(queue);
 	netif_napi_del(&queue->napi);
 err:
-	module_put(THIS_MODULE);
 	return err;
 }
 
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index 2a9d6b0..80508da3 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -1373,7 +1373,7 @@ static int perf_setup_peer_mw(struct perf_peer *peer)
 	int ret;
 
 	/* Get outbound MW parameters and map it */
-	ret = ntb_peer_mw_get_addr(perf->ntb, peer->gidx, &phys_addr,
+	ret = ntb_peer_mw_get_addr(perf->ntb, perf->gidx, &phys_addr,
 				   &peer->outbuf_size);
 	if (ret)
 		return ret;
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 2ba22cd..54a633e 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -189,7 +189,7 @@ static int nvdimm_clear_badblocks_region(struct device *dev, void *data)
 	sector_t sector;
 
 	/* make sure device is a region */
-	if (!is_nd_pmem(dev))
+	if (!is_memory(dev))
 		return 0;
 
 	nd_region = to_nd_region(dev);
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index f9130cc..22224b2 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -42,7 +42,7 @@ static int nd_region_probe(struct device *dev)
 	if (rc)
 		return rc;
 
-	if (is_nd_pmem(&nd_region->dev)) {
+	if (is_memory(&nd_region->dev)) {
 		struct resource ndr_res;
 
 		if (devm_init_badblocks(dev, &nd_region->bb))
@@ -131,7 +131,7 @@ static void nd_region_notify(struct device *dev, enum nvdimm_event event)
 		struct nd_region *nd_region = to_nd_region(dev);
 		struct resource res;
 
-		if (is_nd_pmem(&nd_region->dev)) {
+		if (is_memory(&nd_region->dev)) {
 			res.start = nd_region->ndr_start;
 			res.end = nd_region->ndr_start +
 				nd_region->ndr_size - 1;
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 0303296..609fc45 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -633,11 +633,11 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
 	if (!is_memory(dev) && a == &dev_attr_dax_seed.attr)
 		return 0;
 
-	if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr)
+	if (!is_memory(dev) && a == &dev_attr_badblocks.attr)
 		return 0;
 
 	if (a == &dev_attr_resource.attr) {
-		if (is_nd_pmem(dev))
+		if (is_memory(dev))
 			return 0400;
 		else
 			return 0;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index ae0b010..5d0f99b 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -111,10 +111,13 @@ static void nvme_set_queue_dying(struct nvme_ns *ns)
 	 */
 	if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
 		return;
-	revalidate_disk(ns->disk);
 	blk_set_queue_dying(ns->queue);
 	/* Forcibly unquiesce queues to avoid blocking dispatch */
 	blk_mq_unquiesce_queue(ns->queue);
+	/*
+	 * Revalidate after unblocking dispatchers that may be holding bd_butex
+	 */
+	revalidate_disk(ns->disk);
 }
 
 static void nvme_queue_scan(struct nvme_ctrl *ctrl)
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index fd2dbd7..65eaa6b 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -31,6 +31,9 @@
 #define PCI_REG_VMLOCK		0x70
 #define MB2_SHADOW_EN(vmlock)	(vmlock & 0x2)
 
+#define MB2_SHADOW_OFFSET	0x2000
+#define MB2_SHADOW_SIZE		16
+
 enum vmd_features {
 	/*
 	 * Device may contain registers which hint the physical location of the
@@ -94,6 +97,7 @@ struct vmd_dev {
 	struct resource		resources[3];
 	struct irq_domain	*irq_domain;
 	struct pci_bus		*bus;
+	u8			busn_start;
 
 #ifdef CONFIG_X86_DEV_DMA_OPS
 	struct dma_map_ops	dma_ops;
@@ -465,7 +469,8 @@ static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
 				  unsigned int devfn, int reg, int len)
 {
 	char __iomem *addr = vmd->cfgbar +
-			     (bus->number << 20) + (devfn << 12) + reg;
+			     ((bus->number - vmd->busn_start) << 20) +
+			     (devfn << 12) + reg;
 
 	if ((addr - vmd->cfgbar) + len >=
 	    resource_size(&vmd->dev->resource[VMD_CFGBAR]))
@@ -588,7 +593,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
 	unsigned long flags;
 	LIST_HEAD(resources);
 	resource_size_t offset[2] = {0};
-	resource_size_t membar2_offset = 0x2000, busn_start = 0;
+	resource_size_t membar2_offset = 0x2000;
 
 	/*
 	 * Shadow registers may exist in certain VMD device ids which allow
@@ -600,7 +605,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
 		u32 vmlock;
 		int ret;
 
-		membar2_offset = 0x2018;
+		membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE;
 		ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock);
 		if (ret || vmlock == ~0)
 			return -ENODEV;
@@ -612,9 +617,9 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
 			if (!membar2)
 				return -ENOMEM;
 			offset[0] = vmd->dev->resource[VMD_MEMBAR1].start -
-						readq(membar2 + 0x2008);
+					readq(membar2 + MB2_SHADOW_OFFSET);
 			offset[1] = vmd->dev->resource[VMD_MEMBAR2].start -
-						readq(membar2 + 0x2010);
+					readq(membar2 + MB2_SHADOW_OFFSET + 8);
 			pci_iounmap(vmd->dev, membar2);
 		}
 	}
@@ -630,14 +635,14 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
 		pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig);
 		if (BUS_RESTRICT_CAP(vmcap) &&
 		    (BUS_RESTRICT_CFG(vmconfig) == 0x1))
-			busn_start = 128;
+			vmd->busn_start = 128;
 	}
 
 	res = &vmd->dev->resource[VMD_CFGBAR];
 	vmd->resources[0] = (struct resource) {
 		.name  = "VMD CFGBAR",
-		.start = busn_start,
-		.end   = busn_start + (resource_size(res) >> 20) - 1,
+		.start = vmd->busn_start,
+		.end   = vmd->busn_start + (resource_size(res) >> 20) - 1,
 		.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
 	};
 
@@ -705,8 +710,8 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
 	pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
 	pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]);
 
-	vmd->bus = pci_create_root_bus(&vmd->dev->dev, busn_start, &vmd_ops,
-				       sd, &resources);
+	vmd->bus = pci_create_root_bus(&vmd->dev->dev, vmd->busn_start,
+					&vmd_ops, sd, &resources);
 	if (!vmd->bus) {
 		pci_free_resource_list(&resources);
 		irq_domain_remove(vmd->irq_domain);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index c654653..2baf1f8 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -926,19 +926,6 @@ void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
 }
 
 /**
- * pci_power_up - Put the given device into D0 forcibly
- * @dev: PCI device to power up
- */
-void pci_power_up(struct pci_dev *dev)
-{
-	if (platform_pci_power_manageable(dev))
-		platform_pci_set_power_state(dev, PCI_D0);
-
-	pci_raw_set_power_state(dev, PCI_D0);
-	pci_update_current_state(dev, PCI_D0);
-}
-
-/**
  * pci_platform_power_transition - Use platform to change device power state
  * @dev: PCI device to handle.
  * @state: State to put the device into.
@@ -1117,6 +1104,17 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 EXPORT_SYMBOL(pci_set_power_state);
 
 /**
+ * pci_power_up - Put the given device into D0 forcibly
+ * @dev: PCI device to power up
+ */
+void pci_power_up(struct pci_dev *dev)
+{
+	__pci_start_power_transition(dev, PCI_D0);
+	pci_raw_set_power_state(dev, PCI_D0);
+	pci_update_current_state(dev, PCI_D0);
+}
+
+/**
  * pci_choose_state - Choose the power state of a PCI device
  * @dev: PCI device to be suspended
  * @state: target sleep state for the whole system. This is the value
@@ -1366,7 +1364,7 @@ static void pci_restore_rebar_state(struct pci_dev *pdev)
 		pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
 		bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
 		res = pdev->resource + bar_idx;
-		size = order_base_2((resource_size(res) >> 20) | 1) - 1;
+		size = ilog2(resource_size(res)) - 20;
 		ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
 		ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
 		pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile
index 62b15b1..9ee110c 100644
--- a/drivers/phy/qualcomm/Makefile
+++ b/drivers/phy/qualcomm/Makefile
@@ -8,5 +8,6 @@
 obj-$(CONFIG_PHY_QCOM_UFS)		+= phy-qcom-ufs-qrbtc-sdm845.o
 obj-$(CONFIG_PHY_QCOM_UFS) 		+= phy-qcom-ufs-qmp-v4.o
 obj-$(CONFIG_PHY_QCOM_UFS) 		+= phy-qcom-ufs-qmp-v4-lito.o
+obj-$(CONFIG_PHY_QCOM_UFS)		+= phy-qcom-ufs-qmp-v3-660.o
 obj-$(CONFIG_PHY_QCOM_USB_HS) 		+= phy-qcom-usb-hs.o
 obj-$(CONFIG_PHY_QCOM_USB_HSIC) 	+= phy-qcom-usb-hsic.o
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v3-660.c b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v3-660.c
new file mode 100644
index 0000000..b3c4271
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v3-660.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "phy-qcom-ufs-qmp-v3-660.h"
+
+#define UFS_PHY_NAME "ufs_phy_qmp_v3_660"
+
+static
+int ufs_qcom_phy_qmp_v3_660_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
+					bool is_rate_B, bool is_g4)
+{
+	int err;
+	int tbl_size_A, tbl_size_B;
+	struct ufs_qcom_phy_calibration *tbl_A, *tbl_B;
+	u8 major = ufs_qcom_phy->host_ctrl_rev_major;
+	u16 minor = ufs_qcom_phy->host_ctrl_rev_minor;
+	u16 step = ufs_qcom_phy->host_ctrl_rev_step;
+
+	tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B);
+	tbl_B = phy_cal_table_rate_B;
+
+	if ((major == 0x3) && (minor == 0x001) && (step >= 0x001)) {
+		tbl_A = phy_cal_table_rate_A_3_1_1;
+		tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A_3_1_1);
+	} else {
+		dev_err(ufs_qcom_phy->dev,
+			"%s: Unknown UFS-PHY version (major 0x%x minor 0x%x step 0x%x), no calibration values\n",
+			__func__, major, minor, step);
+		err = -ENODEV;
+		goto out;
+	}
+
+	err = ufs_qcom_phy_calibrate(ufs_qcom_phy,
+				     tbl_A, tbl_size_A,
+				     tbl_B, tbl_size_B,
+				     is_rate_B);
+
+	if (err)
+		dev_err(ufs_qcom_phy->dev,
+			"%s: ufs_qcom_phy_calibrate() failed %d\n",
+			__func__, err);
+
+out:
+	return err;
+}
+
+static int ufs_qcom_phy_qmp_v3_660_init(struct phy *generic_phy)
+{
+	struct ufs_qcom_phy_qmp_v3_660 *phy = phy_get_drvdata(generic_phy);
+	struct ufs_qcom_phy *phy_common = &phy->common_cfg;
+	int err;
+
+	err = ufs_qcom_phy_init_clks(phy_common);
+	if (err) {
+		dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
+			__func__, err);
+		goto out;
+	}
+
+	err = ufs_qcom_phy_init_vregulators(phy_common);
+	if (err) {
+		dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
+			__func__, err);
+		goto out;
+	}
+
+out:
+	return err;
+}
+
+static int ufs_qcom_phy_qmp_v3_660_exit(struct phy *generic_phy)
+{
+	return 0;
+}
+
+static
+void ufs_qcom_phy_qmp_v3_660_power_control(struct ufs_qcom_phy *phy,
+					 bool power_ctrl)
+{
+	if (!power_ctrl) {
+		/* apply analog power collapse */
+		writel_relaxed(0x0, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
+		/*
+		 * Make sure that PHY knows its analog rail is going to be
+		 * powered OFF.
+		 */
+		mb();
+	} else {
+		/* bring PHY out of analog power collapse */
+		writel_relaxed(0x1, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
+
+		/*
+		 * Before any transactions involving PHY, ensure PHY knows
+		 * that it's analog rail is powered ON.
+		 */
+		mb();
+	}
+}
+
+static inline
+void ufs_qcom_phy_qmp_v3_660_set_tx_lane_enable(struct ufs_qcom_phy *phy,
+						   u32 val)
+{
+	/*
+	 * v3 PHY does not have TX_LANE_ENABLE register.
+	 * Implement this function so as not to propagate error to caller.
+	 */
+}
+
+static
+void ufs_qcom_phy_qmp_v3_660_ctrl_rx_linecfg(struct ufs_qcom_phy *phy,
+						bool ctrl)
+{
+	u32 temp;
+
+	temp = readl_relaxed(phy->mmio + UFS_PHY_LINECFG_DISABLE);
+
+	if (ctrl) /* enable RX LineCfg */
+		temp &= ~UFS_PHY_RX_LINECFG_DISABLE_BIT;
+	else /* disable RX LineCfg */
+		temp |= UFS_PHY_RX_LINECFG_DISABLE_BIT;
+
+	writel_relaxed(temp, phy->mmio + UFS_PHY_LINECFG_DISABLE);
+	/* Make sure that RX LineCfg config applied before we return */
+	mb();
+}
+
+static inline void ufs_qcom_phy_qmp_v3_660_start_serdes(
+					struct ufs_qcom_phy *phy)
+{
+	u32 tmp;
+
+	tmp = readl_relaxed(phy->mmio + UFS_PHY_PHY_START);
+	tmp &= ~MASK_SERDES_START;
+	tmp |= (1 << OFFSET_SERDES_START);
+	writel_relaxed(tmp, phy->mmio + UFS_PHY_PHY_START);
+	/* Ensure register value is committed */
+	mb();
+}
+
+static int ufs_qcom_phy_qmp_v3_660_is_pcs_ready(
+				struct ufs_qcom_phy *phy_common)
+{
+	int err = 0;
+	u32 val;
+
+	err = readl_poll_timeout(phy_common->mmio + UFS_PHY_PCS_READY_STATUS,
+		val, (val & MASK_PCS_READY), 10, 1000000);
+	if (err)
+		dev_err(phy_common->dev, "%s: poll for pcs failed err = %d\n",
+			__func__, err);
+	return err;
+}
+
+static void ufs_qcom_phy_qmp_v3_660_dbg_register_dump(
+					struct ufs_qcom_phy *phy)
+{
+	ufs_qcom_phy_dump_regs(phy, COM_BASE, COM_SIZE,
+					"PHY QSERDES COM Registers ");
+	ufs_qcom_phy_dump_regs(phy, PHY_BASE, PHY_SIZE,
+					"PHY Registers ");
+	ufs_qcom_phy_dump_regs(phy, RX_BASE, RX_SIZE,
+					"PHY RX0 Registers ");
+	ufs_qcom_phy_dump_regs(phy, TX_BASE, TX_SIZE,
+					"PHY TX0 Registers ");
+}
+
+struct phy_ops ufs_qcom_phy_qmp_v3_660_phy_ops = {
+	.init		= ufs_qcom_phy_qmp_v3_660_init,
+	.exit		= ufs_qcom_phy_qmp_v3_660_exit,
+	.power_on	= ufs_qcom_phy_power_on,
+	.power_off	= ufs_qcom_phy_power_off,
+	.owner		= THIS_MODULE,
+};
+
+struct ufs_qcom_phy_specific_ops phy_v3_660_ops = {
+	.calibrate_phy		= ufs_qcom_phy_qmp_v3_660_phy_calibrate,
+	.start_serdes		= ufs_qcom_phy_qmp_v3_660_start_serdes,
+	.is_physical_coding_sublayer_ready =
+				ufs_qcom_phy_qmp_v3_660_is_pcs_ready,
+	.set_tx_lane_enable	= ufs_qcom_phy_qmp_v3_660_set_tx_lane_enable,
+	.ctrl_rx_linecfg	= ufs_qcom_phy_qmp_v3_660_ctrl_rx_linecfg,
+	.power_control		= ufs_qcom_phy_qmp_v3_660_power_control,
+	.dbg_register_dump	= ufs_qcom_phy_qmp_v3_660_dbg_register_dump,
+};
+
+static int ufs_qcom_phy_qmp_v3_660_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct phy *generic_phy;
+	struct ufs_qcom_phy_qmp_v3_660 *phy;
+	int err = 0;
+
+	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+	if (!phy) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
+				&ufs_qcom_phy_qmp_v3_660_phy_ops,
+				&phy_v3_660_ops);
+
+	if (!generic_phy) {
+		dev_err(dev, "%s: ufs_qcom_phy_generic_probe() failed\n",
+			__func__);
+		err = -EIO;
+		goto out;
+	}
+
+	phy_set_drvdata(generic_phy, phy);
+
+	strlcpy(phy->common_cfg.name, UFS_PHY_NAME,
+		sizeof(phy->common_cfg.name));
+
+out:
+	return err;
+}
+
+static const struct of_device_id ufs_qcom_phy_qmp_v3_660_of_match[] = {
+	{.compatible = "qcom,ufs-phy-qmp-v3-660"},
+	{},
+};
+MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_v3_660_of_match);
+
+static struct platform_driver ufs_qcom_phy_qmp_v3_660_driver = {
+	.probe = ufs_qcom_phy_qmp_v3_660_probe,
+	.driver = {
+		.of_match_table = ufs_qcom_phy_qmp_v3_660_of_match,
+		.name = "ufs_qcom_phy_qmp_v3_660",
+	},
+};
+
+module_platform_driver(ufs_qcom_phy_qmp_v3_660_driver);
+
+MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY QMP v3 660");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v3-660.h b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v3-660.h
new file mode 100644
index 0000000..e7ff88b
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v3-660.h
@@ -0,0 +1,286 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2013-2016,2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef UFS_QCOM_PHY_QMP_V3_660_H_
+#define UFS_QCOM_PHY_QMP_V3_660_H_
+
+#include "phy-qcom-ufs-i.h"
+
+/* QCOM UFS PHY control registers */
+#define COM_BASE	0x000
+#define COM_OFF(x)	(COM_BASE + x)
+#define COM_SIZE	0x1C0
+
+#define TX_BASE		0x400
+#define TX_OFF(x)	(TX_BASE + x)
+#define TX_SIZE		0x128
+
+#define RX_BASE		0x600
+#define RX_OFF(x)	(RX_BASE + x)
+#define RX_SIZE		0x1FC
+
+#define PHY_BASE	0xC00
+#define PHY_OFF(x)	(PHY_BASE + x)
+#define PHY_SIZE	0x1B4
+
+/* UFS PHY QSERDES COM registers */
+#define QSERDES_COM_ATB_SEL1			COM_OFF(0x00)
+#define QSERDES_COM_ATB_SEL2			COM_OFF(0x04)
+#define QSERDES_COM_FREQ_UPDATE			COM_OFF(0x08)
+#define QSERDES_COM_BG_TIMER			COM_OFF(0x0C)
+#define QSERDES_COM_SSC_EN_CENTER		COM_OFF(0x10)
+#define QSERDES_COM_SSC_ADJ_PER1		COM_OFF(0x14)
+#define QSERDES_COM_SSC_ADJ_PER2		COM_OFF(0x18)
+#define QSERDES_COM_SSC_PER1			COM_OFF(0x1C)
+#define QSERDES_COM_SSC_PER2			COM_OFF(0x20)
+#define QSERDES_COM_SSC_STEP_SIZE1		COM_OFF(0x24)
+#define QSERDES_COM_SSC_STEP_SIZE2		COM_OFF(0x28)
+#define QSERDES_COM_POST_DIV			COM_OFF(0x2C)
+#define QSERDES_COM_POST_DIV_MUX		COM_OFF(0x30)
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		COM_OFF(0x34)
+#define QSERDES_COM_CLK_ENABLE1			COM_OFF(0x38)
+#define QSERDES_COM_SYS_CLK_CTRL		COM_OFF(0x3C)
+#define QSERDES_COM_SYSCLK_BUF_ENABLE		COM_OFF(0x40)
+#define QSERDES_COM_PLL_EN			COM_OFF(0x44)
+#define QSERDES_COM_PLL_IVCO			COM_OFF(0x48)
+#define QSERDES_COM_LOCK_CMP1_MODE0		COM_OFF(0X4C)
+#define QSERDES_COM_LOCK_CMP2_MODE0		COM_OFF(0X50)
+#define QSERDES_COM_LOCK_CMP3_MODE0		COM_OFF(0X54)
+#define QSERDES_COM_LOCK_CMP1_MODE1		COM_OFF(0X58)
+#define QSERDES_COM_LOCK_CMP2_MODE1		COM_OFF(0X5C)
+#define QSERDES_COM_LOCK_CMP3_MODE1		COM_OFF(0X60)
+#define QSERDES_COM_CMD_RSVD0			COM_OFF(0x64)
+#define QSERDES_COM_EP_CLOCK_DETECT_CTRL	COM_OFF(0x68)
+#define QSERDES_COM_SYSCLK_DET_COMP_STATUS	COM_OFF(0x6C)
+#define QSERDES_COM_BG_TRIM			COM_OFF(0x70)
+#define QSERDES_COM_CLK_EP_DIV			COM_OFF(0x74)
+#define QSERDES_COM_CP_CTRL_MODE0		COM_OFF(0x78)
+#define QSERDES_COM_CP_CTRL_MODE1		COM_OFF(0x7C)
+#define QSERDES_COM_CMN_RSVD1			COM_OFF(0x80)
+#define QSERDES_COM_PLL_RCTRL_MODE0		COM_OFF(0x84)
+#define QSERDES_COM_PLL_RCTRL_MODE1		COM_OFF(0x88)
+#define QSERDES_COM_CMN_RSVD2			COM_OFF(0x8C)
+#define QSERDES_COM_PLL_CCTRL_MODE0		COM_OFF(0x90)
+#define QSERDES_COM_PLL_CCTRL_MODE1		COM_OFF(0x94)
+#define QSERDES_COM_CMN_RSVD3			COM_OFF(0x98)
+#define QSERDES_COM_PLL_CNTRL			COM_OFF(0x9C)
+#define QSERDES_COM_PHASE_SEL_CTRL		COM_OFF(0xA0)
+#define QSERDES_COM_PHASE_SEL_DC		COM_OFF(0xA4)
+#define QSERDES_COM_BIAS_EN_CTRL_BY_PSM		COM_OFF(0xA8)
+#define QSERDES_COM_SYSCLK_EN_SEL		COM_OFF(0xAC)
+#define QSERDES_COM_CML_SYSCLK_SEL		COM_OFF(0xB0)
+#define QSERDES_COM_RESETSM_CNTRL		COM_OFF(0xB4)
+#define QSERDES_COM_RESETSM_CNTRL2		COM_OFF(0xB8)
+#define QSERDES_COM_RESTRIM_CTRL		COM_OFF(0xBC)
+#define QSERDES_COM_RESTRIM_CTRL2		COM_OFF(0xC0)
+#define QSERDES_COM_LOCK_CMP_EN			COM_OFF(0xC8)
+#define QSERDES_COM_LOCK_CMP_CFG		COM_OFF(0xCC)
+#define QSERDES_COM_DEC_START_MODE0		COM_OFF(0xD0)
+#define QSERDES_COM_DEC_START_MODE1		COM_OFF(0xD4)
+#define QSERDES_COM_VCOCAL_DEADMAN_CTRL		COM_OFF(0xD8)
+#define QSERDES_COM_DIV_FRAC_START1_MODE0	COM_OFF(0xDC)
+#define QSERDES_COM_DIV_FRAC_START2_MODE0	COM_OFF(0xE0)
+#define QSERDES_COM_DIV_FRAC_START3_MODE0	COM_OFF(0xE4)
+#define QSERDES_COM_DIV_FRAC_START1_MODE1	COM_OFF(0xE8)
+#define QSERDES_COM_DIV_FRAC_START2_MODE1	COM_OFF(0xEC)
+#define QSERDES_COM_DIV_FRAC_START3_MODE1	COM_OFF(0xF0)
+#define QSERDES_COM_VCO_TUNE_MINVAL1		COM_OFF(0xF4)
+#define QSERDES_COM_VCO_TUNE_MINVAL2		COM_OFF(0xF8)
+#define QSERDES_COM_CMN_RSVD4			COM_OFF(0xFC)
+#define QSERDES_COM_INTEGLOOP_INITVAL		COM_OFF(0x100)
+#define QSERDES_COM_INTEGLOOP_EN		COM_OFF(0x104)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0	COM_OFF(0x108)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0	COM_OFF(0x10C)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1	COM_OFF(0x110)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1	COM_OFF(0x114)
+#define QSERDES_COM_VCO_TUNE_MAXVAL1		COM_OFF(0x118)
+#define QSERDES_COM_VCO_TUNE_MAXVAL2		COM_OFF(0x11C)
+#define QSERDES_COM_RES_TRIM_CONTROL2		COM_OFF(0x120)
+#define QSERDES_COM_VCO_TUNE_CTRL		COM_OFF(0x124)
+#define QSERDES_COM_VCO_TUNE_MAP		COM_OFF(0x128)
+#define QSERDES_COM_VCO_TUNE1_MODE0		COM_OFF(0x12C)
+#define QSERDES_COM_VCO_TUNE2_MODE0		COM_OFF(0x130)
+#define QSERDES_COM_VCO_TUNE1_MODE1		COM_OFF(0x134)
+#define QSERDES_COM_VCO_TUNE2_MODE1		COM_OFF(0x138)
+#define QSERDES_COM_VCO_TUNE_INITVAL1		COM_OFF(0x13C)
+#define QSERDES_COM_VCO_TUNE_INITVAL2		COM_OFF(0x140)
+#define QSERDES_COM_VCO_TUNE_TIMER1		COM_OFF(0x144)
+#define QSERDES_COM_VCO_TUNE_TIMER2		COM_OFF(0x148)
+#define QSERDES_COM_SAR				COM_OFF(0x14C)
+#define QSERDES_COM_SAR_CLK			COM_OFF(0x150)
+#define QSERDES_COM_SAR_CODE_OUT_STATUS		COM_OFF(0x154)
+#define QSERDES_COM_SAR_CODE_READY_STATUS	COM_OFF(0x158)
+#define QSERDES_COM_CMN_STATUS			COM_OFF(0x15C)
+#define QSERDES_COM_RESET_SM_STATUS		COM_OFF(0x160)
+#define QSERDES_COM_RESTRIM_CODE_STATUS		COM_OFF(0x164)
+#define QSERDES_COM_PLLCAL_CODE1_STATUS		COM_OFF(0x168)
+#define QSERDES_COM_PLLCAL_CODE2_STATUS		COM_OFF(0x16C)
+#define QSERDES_COM_BG_CTRL			COM_OFF(0x170)
+#define QSERDES_COM_CLK_SELECT			COM_OFF(0x174)
+#define QSERDES_COM_HSCLK_SEL			COM_OFF(0x178)
+#define QSERDES_COM_INTEGLOOP_BINCODE_STATUS	COM_OFF(0x17C)
+#define QSERDES_COM_PLL_ANALOG			COM_OFF(0x180)
+#define QSERDES_COM_CORECLK_DIV			COM_OFF(0x184)
+#define QSERDES_COM_SW_RESET			COM_OFF(0x188)
+#define QSERDES_COM_CORE_CLK_EN			COM_OFF(0x18C)
+#define QSERDES_COM_C_READY_STATUS		COM_OFF(0x190)
+#define QSERDES_COM_CMN_CONFIG			COM_OFF(0x194)
+#define QSERDES_COM_CMN_RATE_OVERRIDE		COM_OFF(0x198)
+#define QSERDES_COM_SVS_MODE_CLK_SEL		COM_OFF(0x19C)
+#define QSERDES_COM_DEBUG_BUS0			COM_OFF(0x1A0)
+#define QSERDES_COM_DEBUG_BUS1			COM_OFF(0x1A4)
+#define QSERDES_COM_DEBUG_BUS2			COM_OFF(0x1A8)
+#define QSERDES_COM_DEBUG_BUS3			COM_OFF(0x1AC)
+#define QSERDES_COM_DEBUG_BUS_SEL		COM_OFF(0x1B0)
+#define QSERDES_COM_CMN_MISC1			COM_OFF(0x1B4)
+#define QSERDES_COM_CORECLK_DIV_MODE1		COM_OFF(0x1BC)
+#define QSERDES_COM_CMN_RSVD5			COM_OFF(0x1C0)
+
+/* UFS PHY registers */
+#define UFS_PHY_PHY_START			PHY_OFF(0x00)
+#define UFS_PHY_POWER_DOWN_CONTROL		PHY_OFF(0x04)
+#define UFS_PHY_TX_LARGE_AMP_DRV_LVL		PHY_OFF(0x34)
+#define UFS_PHY_TX_SMALL_AMP_DRV_LVL		PHY_OFF(0x3C)
+#define UFS_PHY_RX_MIN_STALL_NOCONFIG_TIME_CAP	PHY_OFF(0xCC)
+#define UFS_PHY_LINECFG_DISABLE			PHY_OFF(0x138)
+#define UFS_PHY_RX_SYM_RESYNC_CTRL		PHY_OFF(0x13C)
+#define UFS_PHY_RX_MIN_HIBERN8_TIME		PHY_OFF(0x140)
+#define UFS_PHY_RX_SIGDET_CTRL2			PHY_OFF(0x148)
+#define UFS_PHY_RX_PWM_GEAR_BAND		PHY_OFF(0x154)
+#define UFS_PHY_PCS_READY_STATUS		PHY_OFF(0x168)
+
+/* UFS PHY TX registers */
+#define QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN	TX_OFF(0x68)
+#define	QSERDES_TX_LANE_MODE				TX_OFF(0x94)
+
+/* UFS PHY RX registers */
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_HALF	RX_OFF(0x30)
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER	RX_OFF(0x34)
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_EIGHTH	RX_OFF(0x38)
+#define QSERDES_RX_UCDR_SVS_SO_GAIN		RX_OFF(0x3C)
+#define QSERDES_RX_UCDR_FASTLOCK_FO_GAIN	RX_OFF(0x40)
+#define QSERDES_RX_UCDR_SO_SATURATION_ENABLE	RX_OFF(0x48)
+#define QSERDES_RX_RX_TERM_BW			RX_OFF(0x90)
+#define QSERDES_RX_RX_EQ_GAIN1_LSB		RX_OFF(0xC4)
+#define QSERDES_RX_RX_EQ_GAIN1_MSB		RX_OFF(0xC8)
+#define QSERDES_RX_RX_EQ_GAIN2_LSB		RX_OFF(0xCC)
+#define QSERDES_RX_RX_EQ_GAIN2_MSB		RX_OFF(0xD0)
+#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2	RX_OFF(0xD8)
+#define QSERDES_RX_SIGDET_CNTRL			RX_OFF(0x114)
+#define QSERDES_RX_SIGDET_LVL			RX_OFF(0x118)
+#define QSERDES_RX_SIGDET_DEGLITCH_CNTRL	RX_OFF(0x11C)
+#define QSERDES_RX_RX_INTERFACE_MODE		RX_OFF(0x12C)
+
+
+#define UFS_PHY_RX_LINECFG_DISABLE_BIT		BIT(1)
+
+/*
+ * This structure represents the v3 660 specific phy.
+ * common_cfg MUST remain the first field in this structure
+ * in case extra fields are added. This way, when calling
+ * get_ufs_qcom_phy() of generic phy, we can extract the
+ * common phy structure (struct ufs_qcom_phy) out of it
+ * regardless of the relevant specific phy.
+ */
+struct ufs_qcom_phy_qmp_v3_660 {
+	struct ufs_qcom_phy common_cfg;
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_3_1_1[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_CONFIG, 0x0e),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0x14),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CLK_SELECT, 0x30),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TIMER, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_HSCLK_SEL, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_EN, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_CTRL, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x20),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORE_CLK_EN, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_CFG, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE0, 0x82),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE0, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE0, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0xff),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE1, 0x98),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE1, 0x0b),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE1, 0x16),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE1, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x80),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE1, 0xd6),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE1, 0x00),
+
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN, 0x45),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE, 0x06),
+
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_LVL, 0x24),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_CNTRL, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_INTERFACE_MODE, 0x40),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x1E),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_TERM_BW, 0x5B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0D),
+
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IVCO, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TRIM, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_PWM_GEAR_BAND, 0x15),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN_HALF, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SO_SATURATION_ENABLE, 0x4B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL1, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL2, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6c),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_DRV_LVL, 0x0A),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_DRV_LVL, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_MIN_STALL_NOCONFIG_TIME_CAP, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SYM_RESYNC_CTRL, 0x03),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_MIN_HIBERN8_TIME, 0x9A), /* 8 us */
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x44),
+};
+
+#endif
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index b7e272d..227646e 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1524,7 +1524,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
 			DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
-			DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
 		},
 	},
 	{
@@ -1532,7 +1531,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
 			DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
-			DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
 		},
 	},
 	{
@@ -1540,7 +1538,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
 			DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
-			DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
 		},
 	},
 	{
@@ -1548,7 +1545,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
 			DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
-			DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
 		},
 	},
 	{}
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index aa48b3f..3aac640 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -183,10 +183,10 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
 	PIN_GRP_EXTRA("uart2", 9, 2, BIT(1) | BIT(13) | BIT(14) | BIT(19),
 		      BIT(1) | BIT(13) | BIT(14), BIT(1) | BIT(19),
 		      18, 2, "gpio", "uart"),
-	PIN_GRP_GPIO("led0_od", 11, 1, BIT(20), "led"),
-	PIN_GRP_GPIO("led1_od", 12, 1, BIT(21), "led"),
-	PIN_GRP_GPIO("led2_od", 13, 1, BIT(22), "led"),
-	PIN_GRP_GPIO("led3_od", 14, 1, BIT(23), "led"),
+	PIN_GRP_GPIO_2("led0_od", 11, 1, BIT(20), BIT(20), 0, "led"),
+	PIN_GRP_GPIO_2("led1_od", 12, 1, BIT(21), BIT(21), 0, "led"),
+	PIN_GRP_GPIO_2("led2_od", 13, 1, BIT(22), BIT(22), 0, "led"),
+	PIN_GRP_GPIO_2("led3_od", 14, 1, BIT(23), BIT(23), 0, "led"),
 
 };
 
@@ -218,11 +218,11 @@ static const struct armada_37xx_pin_data armada_37xx_pin_sb = {
 };
 
 static inline void armada_37xx_update_reg(unsigned int *reg,
-					  unsigned int offset)
+					  unsigned int *offset)
 {
 	/* We never have more than 2 registers */
-	if (offset >= GPIO_PER_REG) {
-		offset -= GPIO_PER_REG;
+	if (*offset >= GPIO_PER_REG) {
+		*offset -= GPIO_PER_REG;
 		*reg += sizeof(u32);
 	}
 }
@@ -373,7 +373,7 @@ static inline void armada_37xx_irq_update_reg(unsigned int *reg,
 {
 	int offset = irqd_to_hwirq(d);
 
-	armada_37xx_update_reg(reg, offset);
+	armada_37xx_update_reg(reg, &offset);
 }
 
 static int armada_37xx_gpio_direction_input(struct gpio_chip *chip,
@@ -383,7 +383,7 @@ static int armada_37xx_gpio_direction_input(struct gpio_chip *chip,
 	unsigned int reg = OUTPUT_EN;
 	unsigned int mask;
 
-	armada_37xx_update_reg(&reg, offset);
+	armada_37xx_update_reg(&reg, &offset);
 	mask = BIT(offset);
 
 	return regmap_update_bits(info->regmap, reg, mask, 0);
@@ -396,7 +396,7 @@ static int armada_37xx_gpio_get_direction(struct gpio_chip *chip,
 	unsigned int reg = OUTPUT_EN;
 	unsigned int val, mask;
 
-	armada_37xx_update_reg(&reg, offset);
+	armada_37xx_update_reg(&reg, &offset);
 	mask = BIT(offset);
 	regmap_read(info->regmap, reg, &val);
 
@@ -410,7 +410,7 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip,
 	unsigned int reg = OUTPUT_EN;
 	unsigned int mask, val, ret;
 
-	armada_37xx_update_reg(&reg, offset);
+	armada_37xx_update_reg(&reg, &offset);
 	mask = BIT(offset);
 
 	ret = regmap_update_bits(info->regmap, reg, mask, mask);
@@ -431,7 +431,7 @@ static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
 	unsigned int reg = INPUT_VAL;
 	unsigned int val, mask;
 
-	armada_37xx_update_reg(&reg, offset);
+	armada_37xx_update_reg(&reg, &offset);
 	mask = BIT(offset);
 
 	regmap_read(info->regmap, reg, &val);
@@ -446,7 +446,7 @@ static void armada_37xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
 	unsigned int reg = OUTPUT_VAL;
 	unsigned int mask, val;
 
-	armada_37xx_update_reg(&reg, offset);
+	armada_37xx_update_reg(&reg, &offset);
 	mask = BIT(offset);
 	val = value ? mask : 0;
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 55dc102..1b0d857 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -440,24 +440,24 @@ static int ipa3_attrib_dump(struct ipa_rule_attrib *attrib,
 	int i;
 
 	if (attrib->attrib_mask & IPA_FLT_IS_PURE_ACK)
-		pr_err("is_pure_ack ");
+		pr_cont("is_pure_ack ");
 
 	if (attrib->attrib_mask & IPA_FLT_TOS)
-		pr_err("tos:%d ", attrib->u.v4.tos);
+		pr_cont("tos:%d ", attrib->u.v4.tos);
 
 	if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
-		pr_err("tos_value:%d ", attrib->tos_value);
-		pr_err("tos_mask:%d ", attrib->tos_mask);
+		pr_cont("tos_value:%d ", attrib->tos_value);
+		pr_cont("tos_mask:%d ", attrib->tos_mask);
 	}
 
 	if (attrib->attrib_mask & IPA_FLT_PROTOCOL)
-		pr_err("protocol:%d ", attrib->u.v4.protocol);
+		pr_cont("protocol:%d ", attrib->u.v4.protocol);
 
 	if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
 		if (ip == IPA_IP_v4) {
 			addr[0] = htonl(attrib->u.v4.src_addr);
 			mask[0] = htonl(attrib->u.v4.src_addr_mask);
-			pr_err(
+			pr_cont(
 					"src_addr:%pI4 src_addr_mask:%pI4 ",
 					addr + 0, mask + 0);
 		} else if (ip == IPA_IP_v6) {
@@ -465,7 +465,7 @@ static int ipa3_attrib_dump(struct ipa_rule_attrib *attrib,
 				addr[i] = htonl(attrib->u.v6.src_addr[i]);
 				mask[i] = htonl(attrib->u.v6.src_addr_mask[i]);
 			}
-			pr_err(
+			pr_cont(
 					   "src_addr:%pI6 src_addr_mask:%pI6 ",
 					   addr + 0, mask + 0);
 		}
@@ -474,7 +474,7 @@ static int ipa3_attrib_dump(struct ipa_rule_attrib *attrib,
 		if (ip == IPA_IP_v4) {
 			addr[0] = htonl(attrib->u.v4.dst_addr);
 			mask[0] = htonl(attrib->u.v4.dst_addr_mask);
-			pr_err(
+			pr_cont(
 					   "dst_addr:%pI4 dst_addr_mask:%pI4 ",
 					   addr + 0, mask + 0);
 		} else if (ip == IPA_IP_v6) {
@@ -482,81 +482,84 @@ static int ipa3_attrib_dump(struct ipa_rule_attrib *attrib,
 				addr[i] = htonl(attrib->u.v6.dst_addr[i]);
 				mask[i] = htonl(attrib->u.v6.dst_addr_mask[i]);
 			}
-			pr_err(
+			pr_cont(
 					   "dst_addr:%pI6 dst_addr_mask:%pI6 ",
 					   addr + 0, mask + 0);
 		}
 	}
 	if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
-		pr_err("src_port_range:%u %u ",
+		pr_cont("src_port_range:%u %u ",
 				   attrib->src_port_lo,
 			     attrib->src_port_hi);
 	}
 	if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
-		pr_err("dst_port_range:%u %u ",
+		pr_cont("dst_port_range:%u %u ",
 				   attrib->dst_port_lo,
 			     attrib->dst_port_hi);
 	}
 	if (attrib->attrib_mask & IPA_FLT_TYPE)
-		pr_err("type:%d ", attrib->type);
+		pr_cont("type:%d ", attrib->type);
 
 	if (attrib->attrib_mask & IPA_FLT_CODE)
-		pr_err("code:%d ", attrib->code);
+		pr_cont("code:%d ", attrib->code);
 
 	if (attrib->attrib_mask & IPA_FLT_SPI)
-		pr_err("spi:%x ", attrib->spi);
+		pr_cont("spi:%x ", attrib->spi);
 
 	if (attrib->attrib_mask & IPA_FLT_SRC_PORT)
-		pr_err("src_port:%u ", attrib->src_port);
+		pr_cont("src_port:%u ", attrib->src_port);
 
 	if (attrib->attrib_mask & IPA_FLT_DST_PORT)
-		pr_err("dst_port:%u ", attrib->dst_port);
+		pr_cont("dst_port:%u ", attrib->dst_port);
 
 	if (attrib->attrib_mask & IPA_FLT_TC)
-		pr_err("tc:%d ", attrib->u.v6.tc);
+		pr_cont("tc:%d ", attrib->u.v6.tc);
 
 	if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL)
-		pr_err("flow_label:%x ", attrib->u.v6.flow_label);
+		pr_cont("flow_label:%x ", attrib->u.v6.flow_label);
 
 	if (attrib->attrib_mask & IPA_FLT_NEXT_HDR)
-		pr_err("next_hdr:%d ", attrib->u.v6.next_hdr);
+		pr_cont("next_hdr:%d ", attrib->u.v6.next_hdr);
 
 	if (attrib->attrib_mask & IPA_FLT_META_DATA) {
-		pr_err(
+		pr_cont(
 				   "metadata:%x metadata_mask:%x ",
 				   attrib->meta_data, attrib->meta_data_mask);
 	}
 
 	if (attrib->attrib_mask & IPA_FLT_FRAGMENT)
-		pr_err("frg ");
+		pr_cont("frg ");
 
 	if ((attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) ||
 		(attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3)) {
-		pr_err("src_mac_addr:%pM ", attrib->src_mac_addr);
+		pr_cont("src_mac_addr:%pM ", attrib->src_mac_addr);
 	}
 
 	if ((attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) ||
 		(attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) ||
 		(attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP)) {
-		pr_err("dst_mac_addr:%pM ", attrib->dst_mac_addr);
+		pr_cont("dst_mac_addr:%pM ", attrib->dst_mac_addr);
 	}
 
 	if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE)
-		pr_err("ether_type:%x ", attrib->ether_type);
+		pr_cont("ether_type:%x ", attrib->ether_type);
+
+	if (attrib->attrib_mask & IPA_FLT_VLAN_ID)
+		pr_cont("vlan_id:%x ", attrib->vlan_id);
 
 	if (attrib->attrib_mask & IPA_FLT_TCP_SYN)
-		pr_err("tcp syn ");
+		pr_cont("tcp syn ");
 
 	if (attrib->attrib_mask & IPA_FLT_TCP_SYN_L2TP)
-		pr_err("tcp syn l2tp ");
+		pr_cont("tcp syn l2tp ");
 
 	if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE)
-		pr_err("l2tp inner ip type: %d ", attrib->type);
+		pr_cont("l2tp inner ip type: %d ", attrib->type);
 
 	if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) {
 		addr[0] = htonl(attrib->u.v4.dst_addr);
 		mask[0] = htonl(attrib->u.v4.dst_addr_mask);
-		pr_err("dst_addr:%pI4 dst_addr_mask:%pI4 ", addr, mask);
+		pr_cont("dst_addr:%pI4 dst_addr_mask:%pI4 ", addr, mask);
 	}
 
 	pr_err("\n");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 0e13163..876c052 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -771,6 +771,26 @@ static int ipa3_handle_rx_core(struct ipa3_sys_context *sys, bool process_all,
 }
 
 /**
+ * __ipa3_update_curr_poll_state -> update current polling for default wan and
+ *                                  coalescing pipe.
+ * In RSC/RSB enabled cases using common event ring, so both the pipe
+ * polling state should be in sync.
+ */
+void __ipa3_update_curr_poll_state(enum ipa_client_type client, int state)
+{
+	int ep_idx = IPA_EP_NOT_ALLOCATED;
+
+	if (client == IPA_CLIENT_APPS_WAN_COAL_CONS)
+		ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+	if (client == IPA_CLIENT_APPS_WAN_CONS)
+		ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+
+	if (ep_idx != IPA_EP_NOT_ALLOCATED && ipa3_ctx->ep[ep_idx].sys)
+		atomic_set(&ipa3_ctx->ep[ep_idx].sys->curr_polling_state,
+									state);
+}
+
+/**
  * ipa3_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode
  */
 static int ipa3_rx_switch_to_intr_mode(struct ipa3_sys_context *sys)
@@ -778,6 +798,8 @@ static int ipa3_rx_switch_to_intr_mode(struct ipa3_sys_context *sys)
 	int ret;
 
 	atomic_set(&sys->curr_polling_state, 0);
+	__ipa3_update_curr_poll_state(sys->ep->client, 0);
+
 	ipa3_dec_release_wakelock();
 	ret = gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
 		GSI_CHAN_MODE_CALLBACK);
@@ -786,8 +808,10 @@ static int ipa3_rx_switch_to_intr_mode(struct ipa3_sys_context *sys)
 		if (ret == -GSI_STATUS_PENDING_IRQ) {
 			ipa3_inc_acquire_wakelock();
 			atomic_set(&sys->curr_polling_state, 1);
+			__ipa3_update_curr_poll_state(sys->ep->client, 1);
 		} else {
-			IPAERR("Failed to switch to intr mode.\n");
+			IPAERR("Failed to switch to intr mode %d ch_id %d\n",
+			 sys->curr_polling_state, sys->ep->gsi_chan_hdl);
 		}
 	}
 
@@ -4301,6 +4325,8 @@ void __ipa_gsi_irq_rx_scedule_poll(struct ipa3_sys_context *sys)
 	bool clk_off;
 
 	atomic_set(&sys->curr_polling_state, 1);
+	__ipa3_update_curr_poll_state(sys->ep->client, 1);
+
 	ipa3_inc_acquire_wakelock();
 
 	/*
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c
index 3458de6..31c82da 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c
@@ -252,7 +252,7 @@ int ipa_hw_stats_init(void)
 	return ret;
 }
 
-static void ipa_close_coal_frame(struct ipahal_imm_cmd_pyld *coal_cmd_pyld)
+static void ipa_close_coal_frame(struct ipahal_imm_cmd_pyld **coal_cmd_pyld)
 {
 	int i;
 	struct ipahal_reg_valmask valmask;
@@ -266,7 +266,7 @@ static void ipa_close_coal_frame(struct ipahal_imm_cmd_pyld *coal_cmd_pyld)
 	ipahal_get_aggr_force_close_valmask(i, &valmask);
 	reg_write_coal_close.value = valmask.val;
 	reg_write_coal_close.value_mask = valmask.mask;
-	coal_cmd_pyld = ipahal_construct_imm_cmd(
+	*coal_cmd_pyld = ipahal_construct_imm_cmd(
 		IPA_IMM_CMD_REGISTER_WRITE,
 		&reg_write_coal_close, false);
 }
@@ -321,7 +321,7 @@ int ipa_init_quota_stats(u32 pipe_bitmask)
 	/* IC to close the coal frame before HPS Clear if coal is enabled */
 	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
 		IPA_EP_NOT_ALLOCATED) {
-		ipa_close_coal_frame(coal_cmd_pyld);
+		ipa_close_coal_frame(&coal_cmd_pyld);
 		if (!coal_cmd_pyld) {
 			IPAERR("failed to construct coal close IC\n");
 			ret = -ENOMEM;
@@ -459,7 +459,7 @@ int ipa_get_quota_stats(struct ipa_quota_stats_all *out)
 	/* IC to close the coal frame before HPS Clear if coal is enabled */
 	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
 		IPA_EP_NOT_ALLOCATED) {
-		ipa_close_coal_frame(cmd_pyld[num_cmd]);
+		ipa_close_coal_frame(&cmd_pyld[num_cmd]);
 		if (!cmd_pyld[num_cmd]) {
 			IPAERR("failed to construct coal close IC\n");
 			ret = -ENOMEM;
@@ -666,7 +666,7 @@ int ipa_init_teth_stats(struct ipa_teth_stats_endpoints *in)
 	/* IC to close the coal frame before HPS Clear if coal is enabled */
 	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
 		IPA_EP_NOT_ALLOCATED) {
-		ipa_close_coal_frame(coal_cmd_pyld);
+		ipa_close_coal_frame(&coal_cmd_pyld);
 		if (!coal_cmd_pyld) {
 			IPAERR("failed to construct coal close IC\n");
 			ret = -ENOMEM;
@@ -811,7 +811,7 @@ int ipa_get_teth_stats(void)
 	/* IC to close the coal frame before HPS Clear if coal is enabled */
 	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
 		IPA_EP_NOT_ALLOCATED) {
-		ipa_close_coal_frame(cmd_pyld[num_cmd]);
+		ipa_close_coal_frame(&cmd_pyld[num_cmd]);
 		if (!cmd_pyld[num_cmd]) {
 			IPAERR("failed to construct coal close IC\n");
 			ret = -ENOMEM;
@@ -1095,7 +1095,7 @@ int ipa_init_flt_rt_stats(void)
 	/* IC to close the coal frame before HPS Clear if coal is enabled */
 	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
 		IPA_EP_NOT_ALLOCATED) {
-		ipa_close_coal_frame(coal_cmd_pyld);
+		ipa_close_coal_frame(&coal_cmd_pyld);
 		if (!coal_cmd_pyld) {
 			IPAERR("failed to construct coal close IC\n");
 			ret = -ENOMEM;
@@ -1294,7 +1294,7 @@ static int __ipa_get_flt_rt_stats(struct ipa_ioc_flt_rt_query *query)
 	/* IC to close the coal frame before HPS Clear if coal is enabled */
 	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
 		IPA_EP_NOT_ALLOCATED) {
-		ipa_close_coal_frame(cmd_pyld[num_cmd]);
+		ipa_close_coal_frame(&cmd_pyld[num_cmd]);
 		if (!cmd_pyld[num_cmd]) {
 			IPAERR("failed to construct coal close IC\n");
 			ret = -ENOMEM;
@@ -1552,7 +1552,7 @@ int ipa_init_drop_stats(u32 pipe_bitmask)
 	/* IC to close the coal frame before HPS Clear if coal is enabled */
 	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
 		IPA_EP_NOT_ALLOCATED) {
-		ipa_close_coal_frame(coal_cmd_pyld);
+		ipa_close_coal_frame(&coal_cmd_pyld);
 		if (!coal_cmd_pyld) {
 			IPAERR("failed to construct coal close IC\n");
 			ret = -ENOMEM;
@@ -1691,7 +1691,7 @@ int ipa_get_drop_stats(struct ipa_drop_stats_all *out)
 	/* IC to close the coal frame before HPS Clear if coal is enabled */
 	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
 		IPA_EP_NOT_ALLOCATED) {
-		ipa_close_coal_frame(cmd_pyld[num_cmd]);
+		ipa_close_coal_frame(&cmd_pyld[num_cmd]);
 		if (!cmd_pyld[num_cmd]) {
 			IPAERR("failed to construct coal close IC\n");
 			ret = -ENOMEM;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
index d2f4090..79ca342 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
@@ -2322,16 +2322,24 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 		}
 		if (ul_prod != IPA_CLIENT_MAX) {
 			/* No teth started yet, disable UL channel */
+			ipa_ep_idx = ipa3_get_ep_mapping(ul_prod);
+			if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+				IPA_MPM_ERR("fail to alloc EP.\n");
+				goto fail_stop_channel;
+			}
 			ret = ipa3_stop_gsi_channel(ipa_ep_idx);
 			if (ret) {
 				IPA_MPM_ERR("MHIP Stop channel err = %d\n",
 					ret);
 				goto fail_stop_channel;
 			}
+			ipa_mpm_change_gsi_state(probe_id,
+				IPA_MPM_MHIP_CHAN_UL,
+				GSI_STOPPED);
 		}
 		if (is_acted)
-			ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id, true,
-						&is_acted);
+			ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
+							true, &is_acted);
 		break;
 	case IPA_MPM_TETH_INPROGRESS:
 	case IPA_MPM_TETH_CONNECTED:
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 3861830..e12fdf4 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -7704,7 +7704,7 @@ int ipa3_stop_gsi_channel(u32 clnt_hdl)
 
 static int _ipa_suspend_resume_pipe(enum ipa_client_type client, bool suspend)
 {
-	int ipa_ep_idx;
+	int ipa_ep_idx, coal_ep_idx;
 	struct ipa3_ep_context *ep;
 	int res;
 
@@ -7723,16 +7723,16 @@ static int _ipa_suspend_resume_pipe(enum ipa_client_type client, bool suspend)
 	if (!ep->valid)
 		return 0;
 
+	coal_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+
 	IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend", ipa_ep_idx);
+
 	/*
-	 * move the channel to callback mode.
-	 * This needs to happen before starting the channel to make
-	 * sure we don't loose any interrupt
+	 * Configure the callback mode only one time after starting the channel
+	 * otherwise observing IEOB interrupt received before configure callmode
+	 * second time. It was leading race condition in updating current
+	 * polling state.
 	 */
-	if (!suspend && !atomic_read(&ep->sys->curr_polling_state) &&
-		!IPA_CLIENT_IS_APPS_PROD(client))
-		gsi_config_channel_mode(ep->gsi_chan_hdl,
-					GSI_CHAN_MODE_CALLBACK);
 
 	if (suspend) {
 		res = __ipa3_stop_gsi_channel(ipa_ep_idx);
@@ -7749,7 +7749,17 @@ static int _ipa_suspend_resume_pipe(enum ipa_client_type client, bool suspend)
 	}
 
 	/* Apps prod pipes use common event ring so cannot configure mode*/
-	if (IPA_CLIENT_IS_APPS_PROD(client))
+
+	/*
+	 * Skipping to configure mode for default wan pipe,
+	 * as both pipes using commong event ring. if both pipes
+	 * configure same event ring observing race condition in
+	 * updating current polling state.
+	 */
+
+	if (IPA_CLIENT_IS_APPS_PROD(client) ||
+		(client == IPA_CLIENT_APPS_WAN_CONS &&
+			coal_ep_idx != IPA_EP_NOT_ALLOCATED))
 		return 0;
 
 	if (suspend) {
@@ -7805,18 +7815,29 @@ void ipa3_force_close_coal(void)
 int ipa3_suspend_apps_pipes(bool suspend)
 {
 	int res;
-	enum ipa_client_type client;
 
 	if (suspend)
 		ipa3_force_close_coal();
 
-	for (client = 0; client < IPA_CLIENT_MAX; client++) {
-		if (IPA_CLIENT_IS_APPS_CONS(client)) {
-			res = _ipa_suspend_resume_pipe(client, suspend);
-			if (res)
-				goto undo_cons;
-		}
-	}
+	/* As per HPG first need start/stop coalescing channel
+	 * then default one. Coalescing client number was greater then
+	 * default one so starting the last client.
+	 */
+	res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_COAL_CONS, suspend);
+	if (res == -EAGAIN)
+		goto undo_coal_cons;
+
+	res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_CONS, suspend);
+	if (res == -EAGAIN)
+		goto undo_wan_cons;
+
+	res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_LAN_CONS, suspend);
+	if (res == -EAGAIN)
+		goto undo_lan_cons;
+
+	res = _ipa_suspend_resume_pipe(IPA_CLIENT_ODL_DPL_CONS, suspend);
+	if (res == -EAGAIN)
+		goto undo_odl_cons;
 
 	if (suspend) {
 		struct ipahal_reg_tx_wrapper tx;
@@ -7832,7 +7853,7 @@ int ipa3_suspend_apps_pipes(bool suspend)
 			IPADBG("COAL frame is open 0x%x\n",
 				tx.coal_slave_open_frame);
 			res = -EAGAIN;
-			goto undo_cons;
+			goto undo_odl_cons;
 		}
 
 		usleep_range(IPA_TAG_SLEEP_MIN_USEC, IPA_TAG_SLEEP_MAX_USEC);
@@ -7841,28 +7862,37 @@ int ipa3_suspend_apps_pipes(bool suspend)
 			ipa3_ctx->ee);
 		if (res) {
 			IPADBG("suspend irq is pending 0x%x\n", res);
-			goto undo_cons;
+			goto undo_odl_cons;
 		}
 	}
 do_prod:
-	for (client = 0; client < IPA_CLIENT_MAX; client++) {
-		if (IPA_CLIENT_IS_APPS_PROD(client)) {
-			res = _ipa_suspend_resume_pipe(client, suspend);
-			if (res)
-				goto undo_prod;
-		}
-	}
+	res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_LAN_PROD, suspend);
+	if (res == -EAGAIN)
+		goto undo_lan_prod;
+	res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_PROD, suspend);
+	if (res == -EAGAIN)
+		goto undo_wan_prod;
 
 	return 0;
-undo_prod:
-	for (client; client <= IPA_CLIENT_MAX && client >= 0; client--)
-		if (IPA_CLIENT_IS_APPS_PROD(client))
-			_ipa_suspend_resume_pipe(client, !suspend);
-	client = IPA_CLIENT_MAX;
-undo_cons:
-	for (client; client <= IPA_CLIENT_MAX && client >= 0; client--)
-		if (IPA_CLIENT_IS_APPS_CONS(client))
-			_ipa_suspend_resume_pipe(client, !suspend);
+
+undo_wan_prod:
+	_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_PROD, !suspend);
+
+undo_lan_prod:
+	_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_LAN_PROD, !suspend);
+
+undo_odl_cons:
+	_ipa_suspend_resume_pipe(IPA_CLIENT_ODL_DPL_CONS, !suspend);
+undo_lan_cons:
+	_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_LAN_CONS, !suspend);
+undo_wan_cons:
+	_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_COAL_CONS, !suspend);
+	_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_CONS, !suspend);
+	return res;
+
+undo_coal_cons:
+	_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_COAL_CONS, !suspend);
+
 	return res;
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
index d84646f..8d6cb38 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
@@ -847,6 +847,31 @@ static void ipa_fltrt_generate_mac_addr_hw_rule(u8 **extra, u8 **rest,
 		*rest = ipa_write_8(mac_addr[i], *rest);
 }
 
+static inline int ipa_fltrt_generate_vlan_hw_rule_bdy(u16 *en_rule,
+	const struct ipa_rule_attrib *attrib,
+	u8 *ofst_meq32, u8 **extra, u8 **rest)
+{
+	if (attrib->attrib_mask & IPA_FLT_VLAN_ID) {
+		uint32_t vlan_tag;
+
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, *ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[*ofst_meq32]);
+		/* -6 => offset of 802_1Q tag in L2 hdr */
+		*extra = ipa_write_8((u8)-6, *extra);
+		/* filter vlan packets: 0x8100 TPID + required VLAN ID */
+		vlan_tag = (0x8100 << 16) | (attrib->vlan_id & 0xFFF);
+		*rest = ipa_write_32(0xFFFF0FFF, *rest);
+		*rest = ipa_write_32(vlan_tag, *rest);
+		(*ofst_meq32)++;
+	}
+
+	return 0;
+}
+
 static int ipa_fltrt_generate_hw_rule_bdy_ip4(u16 *en_rule,
 	const struct ipa_rule_attrib *attrib,
 	u8 **extra_wrds, u8 **rest_wrds)
@@ -1036,6 +1061,10 @@ static int ipa_fltrt_generate_hw_rule_bdy_ip4(u16 *en_rule,
 		}
 	}
 
+	if (ipa_fltrt_generate_vlan_hw_rule_bdy(en_rule, attrib, &ofst_meq32,
+		&extra, &rest))
+		goto err;
+
 	if (attrib->attrib_mask & IPA_FLT_TYPE) {
 		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
 			ihl_ofst_meq32)) {
@@ -1422,6 +1451,10 @@ static int ipa_fltrt_generate_hw_rule_bdy_ip6(u16 *en_rule,
 		ofst_meq32++;
 	}
 
+	if (ipa_fltrt_generate_vlan_hw_rule_bdy(en_rule, attrib, &ofst_meq32,
+		&extra, &rest))
+		goto err;
+
 	if (attrib->attrib_mask & IPA_FLT_TYPE) {
 		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
 			ihl_ofst_meq32)) {
@@ -2046,6 +2079,31 @@ static void ipa_flt_generate_mac_addr_eq(struct ipa_ipfltri_rule_eq *eq_atrb,
 			mac_addr[i];
 }
 
+static inline int ipa_flt_generat_vlan_eq(
+	const struct ipa_rule_attrib *attrib, u16 *en_rule, u8 *ofst_meq32,
+	struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+	if (attrib->attrib_mask & IPA_FLT_VLAN_ID) {
+		uint32_t vlan_tag;
+
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, *ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[*ofst_meq32]);
+		/* -6 => offset of 802_1Q tag in L2 hdr */
+		eq_atrb->offset_meq_32[*ofst_meq32].offset = -6;
+		/* filter vlan packets: 0x8100 TPID + required VLAN ID */
+		vlan_tag = (0x8100 << 16) | (attrib->vlan_id & 0xFFF);
+		eq_atrb->offset_meq_32[*ofst_meq32].mask = 0xFFFF0FFF;
+		eq_atrb->offset_meq_32[*ofst_meq32].value = vlan_tag;
+		(*ofst_meq32)++;
+	}
+
+	return 0;
+}
+
 static int ipa_flt_generate_eq_ip4(enum ipa_ip_type ip,
 		const struct ipa_rule_attrib *attrib,
 		struct ipa_ipfltri_rule_eq *eq_atrb)
@@ -2282,6 +2340,9 @@ static int ipa_flt_generate_eq_ip4(enum ipa_ip_type ip,
 		}
 	}
 
+	if (ipa_flt_generat_vlan_eq(attrib, en_rule, &ofst_meq32, eq_atrb))
+		return -EPERM;
+
 	if (attrib->attrib_mask & IPA_FLT_TYPE) {
 		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
 			ihl_ofst_meq32)) {
@@ -2770,6 +2831,9 @@ static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
 		ofst_meq32++;
 	}
 
+	if (ipa_flt_generat_vlan_eq(attrib, en_rule, &ofst_meq32, eq_atrb))
+		return -EPERM;
+
 	if (attrib->attrib_mask & IPA_FLT_TYPE) {
 		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
 			ihl_ofst_meq32)) {
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index e9a7d6c..fe0d9a7 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -465,6 +465,7 @@ static struct device_attribute power_supply_attrs[] = {
 	POWER_SUPPLY_ATTR(adapter_cc_mode),
 	POWER_SUPPLY_ATTR(skin_health),
 	POWER_SUPPLY_ATTR(aicl_done),
+	POWER_SUPPLY_ATTR(voltage_step),
 	/* Charge pump properties */
 	POWER_SUPPLY_ATTR(cp_status1),
 	POWER_SUPPLY_ATTR(cp_status2),
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 81d6547..6f58167 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -94,12 +94,11 @@ struct pl_data {
 	struct class		qcom_batt_class;
 	struct wakeup_source	*pl_ws;
 	struct notifier_block	nb;
+	struct charger_param	*chg_param;
 	bool			pl_disable;
 	bool			cp_disabled;
 	int			taper_entry_fv;
 	int			main_fcc_max;
-	int			fcc_step_size_ua;
-	int			fcc_step_delay_ms;
 	/* debugfs directory */
 	struct dentry		*dfs_root;
 	u32			float_voltage_uv;
@@ -143,6 +142,15 @@ enum {
 /*********
  * HELPER*
  *********/
+static bool is_usb_available(struct pl_data *chip)
+{
+	if (!chip->usb_psy)
+		chip->usb_psy =
+			power_supply_get_by_name("usb");
+
+	return !!chip->usb_psy;
+}
+
 static bool is_cp_available(struct pl_data *chip)
 {
 	if (!chip->cp_master_psy)
@@ -196,9 +204,12 @@ static int cp_get_parallel_mode(struct pl_data *chip, int mode)
  */
 static void cp_configure_ilim(struct pl_data *chip, const char *voter, int ilim)
 {
-	int rc, fcc;
+	int rc, fcc, main_icl, target_icl = chip->chg_param->hvdcp3_max_icl_ua;
 	union power_supply_propval pval = {0, };
 
+	if (!is_usb_available(chip))
+		return;
+
 	if (!is_cp_available(chip))
 		return;
 
@@ -206,6 +217,31 @@ static void cp_configure_ilim(struct pl_data *chip, const char *voter, int ilim)
 					== POWER_SUPPLY_PL_OUTPUT_VPH)
 		return;
 
+	rc = power_supply_get_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_REAL_TYPE, &pval);
+	if (rc < 0)
+		return;
+
+	/*
+	 * For HVDCP3 adapters limit max. ILIM based on DT configuration
+	 * of HVDCP3 ICL value.
+	 * Input VBUS:
+	 * target_icl = HVDCP3_ICL - main_ICL
+	 * Input VMID
+	 * target_icl = HVDCP3_ICL
+	 */
+	if (pval.intval == POWER_SUPPLY_TYPE_USB_HVDCP_3) {
+		if (((cp_get_parallel_mode(chip, PARALLEL_INPUT_MODE))
+					== POWER_SUPPLY_PL_USBIN_USBIN)) {
+			main_icl = get_effective_result_locked(
+							chip->usb_icl_votable);
+			if ((main_icl >= 0) && (main_icl < target_icl))
+				target_icl -= main_icl;
+		}
+
+		ilim = min(target_icl, ilim);
+	}
+
 	rc = power_supply_get_property(chip->cp_master_psy,
 				POWER_SUPPLY_PROP_MIN_ICL, &pval);
 	if (rc < 0)
@@ -227,6 +263,10 @@ static void cp_configure_ilim(struct pl_data *chip, const char *voter, int ilim)
 			vote(chip->cp_ilim_votable, voter, true, pval.intval);
 		else
 			vote(chip->cp_ilim_votable, voter, true, ilim);
+
+		pl_dbg(chip, PR_PARALLEL,
+			"ILIM: vote: %d voter:%s min_ilim=%d fcc = %d\n",
+			ilim, voter, pval.intval, fcc);
 	}
 }
 
@@ -523,8 +563,6 @@ ATTRIBUTE_GROUPS(batt_class);
  *  FCC  *
  **********/
 #define EFFICIENCY_PCT	80
-#define DEFAULT_FCC_STEP_SIZE_UA 100000
-#define DEFAULT_FCC_STEP_UPDATE_DELAY_MS 1000
 #define STEP_UP 1
 #define STEP_DOWN -1
 static void get_fcc_split(struct pl_data *chip, int total_ua,
@@ -627,7 +665,7 @@ static void get_fcc_stepper_params(struct pl_data *chip, int main_fcc_ua,
 {
 	int main_set_fcc_ua, total_fcc_ua;
 
-	if (!chip->fcc_step_size_ua) {
+	if (!chip->chg_param->fcc_step_size_ua) {
 		pr_err("Invalid fcc stepper step size, value 0\n");
 		return;
 	}
@@ -658,16 +696,18 @@ static void get_fcc_stepper_params(struct pl_data *chip, int main_fcc_ua,
 	chip->main_step_fcc_dir = (main_fcc_ua > chip->main_fcc_ua) ?
 				STEP_UP : STEP_DOWN;
 	chip->main_step_fcc_count = abs((main_fcc_ua - chip->main_fcc_ua) /
-				chip->fcc_step_size_ua);
+				chip->chg_param->fcc_step_size_ua);
 	chip->main_step_fcc_residual = abs((main_fcc_ua - chip->main_fcc_ua) %
-				chip->fcc_step_size_ua);
+				chip->chg_param->fcc_step_size_ua);
 
 	chip->parallel_step_fcc_dir = (parallel_fcc_ua > chip->slave_fcc_ua) ?
 				STEP_UP : STEP_DOWN;
-	chip->parallel_step_fcc_count = abs((parallel_fcc_ua -
-				chip->slave_fcc_ua) / chip->fcc_step_size_ua);
-	chip->parallel_step_fcc_residual = abs((parallel_fcc_ua -
-				chip->slave_fcc_ua) % chip->fcc_step_size_ua);
+	chip->parallel_step_fcc_count
+				= abs((parallel_fcc_ua - chip->slave_fcc_ua) /
+					chip->chg_param->fcc_step_size_ua);
+	chip->parallel_step_fcc_residual
+				= abs((parallel_fcc_ua - chip->slave_fcc_ua) %
+					chip->chg_param->fcc_step_size_ua);
 
 skip_fcc_step_update:
 	if (chip->parallel_step_fcc_count || chip->parallel_step_fcc_residual
@@ -958,19 +998,20 @@ static void fcc_stepper_work(struct work_struct *work)
 	}
 
 	if (chip->main_step_fcc_count) {
-		main_fcc += (chip->fcc_step_size_ua * chip->main_step_fcc_dir);
+		main_fcc += (chip->chg_param->fcc_step_size_ua
+					* chip->main_step_fcc_dir);
 		chip->main_step_fcc_count--;
-		reschedule_ms = chip->fcc_step_delay_ms;
+		reschedule_ms = chip->chg_param->fcc_step_delay_ms;
 	} else if (chip->main_step_fcc_residual) {
 		main_fcc += chip->main_step_fcc_residual;
 		chip->main_step_fcc_residual = 0;
 	}
 
 	if (chip->parallel_step_fcc_count) {
-		parallel_fcc += (chip->fcc_step_size_ua *
-			chip->parallel_step_fcc_dir);
+		parallel_fcc += (chip->chg_param->fcc_step_size_ua
+					* chip->parallel_step_fcc_dir);
 		chip->parallel_step_fcc_count--;
-		reschedule_ms = chip->fcc_step_delay_ms;
+		reschedule_ms = chip->chg_param->fcc_step_delay_ms;
 	} else if (chip->parallel_step_fcc_residual) {
 		parallel_fcc += chip->parallel_step_fcc_residual;
 		chip->parallel_step_fcc_residual = 0;
@@ -1817,22 +1858,11 @@ static int pl_determine_initial_status(struct pl_data *chip)
 
 static void pl_config_init(struct pl_data *chip, int smb_version)
 {
-	chip->fcc_step_size_ua = DEFAULT_FCC_STEP_SIZE_UA;
-	chip->fcc_step_delay_ms = DEFAULT_FCC_STEP_UPDATE_DELAY_MS;
-
 	switch (smb_version) {
-	case PM8150B_SUBTYPE:
-		chip->fcc_step_delay_ms = 100;
-		break;
 	case PMI8998_SUBTYPE:
 	case PM660_SUBTYPE:
 		chip->wa_flags = AICL_RERUN_WA_BIT | FORCE_INOV_DISABLE_BIT;
 		break;
-	case PMI632_SUBTYPE:
-		break;
-	case PM7250B_SUBTYPE:
-		chip->fcc_step_delay_ms = 100;
-		break;
 	default:
 		break;
 	}
@@ -1857,11 +1887,16 @@ static void qcom_batt_create_debugfs(struct pl_data *chip)
 }
 
 #define DEFAULT_RESTRICTED_CURRENT_UA	1000000
-int qcom_batt_init(int smb_version)
+int qcom_batt_init(struct charger_param *chg_param)
 {
 	struct pl_data *chip;
 	int rc = 0;
 
+	if (!chg_param) {
+		pr_err("invalid charger parameter\n");
+		return -EINVAL;
+	}
+
 	/* initialize just once */
 	if (the_chip) {
 		pr_err("was initialized earlier. Failing now\n");
@@ -1875,7 +1910,8 @@ int qcom_batt_init(int smb_version)
 	qcom_batt_create_debugfs(chip);
 
 	chip->slave_pct = 50;
-	pl_config_init(chip, smb_version);
+	chip->chg_param = chg_param;
+	pl_config_init(chip, chg_param->smb_version);
 	chip->restricted_current = DEFAULT_RESTRICTED_CURRENT_UA;
 
 	chip->pl_ws = wakeup_source_register("qcom-battery");
diff --git a/drivers/power/supply/qcom/battery.h b/drivers/power/supply/qcom/battery.h
index 53a54e86..fbc6b25 100644
--- a/drivers/power/supply/qcom/battery.h
+++ b/drivers/power/supply/qcom/battery.h
@@ -1,10 +1,18 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, 2019 The Linux Foundation. All rights reserved.
  */
 
 #ifndef __BATTERY_H
 #define __BATTERY_H
-int qcom_batt_init(int smb_version);
+
+struct charger_param {
+	u32 fcc_step_delay_ms;
+	u32 fcc_step_size_ua;
+	u32 smb_version;
+	u32 hvdcp3_max_icl_ua;
+};
+
+int qcom_batt_init(struct charger_param *param);
 void qcom_batt_deinit(void);
 #endif /* __BATTERY_H */
diff --git a/drivers/power/supply/qcom/fg-alg.c b/drivers/power/supply/qcom/fg-alg.c
index bc52122..58a4eb9 100644
--- a/drivers/power/supply/qcom/fg-alg.c
+++ b/drivers/power/supply/qcom/fg-alg.c
@@ -1413,7 +1413,8 @@ static void ttf_work(struct work_struct *work)
 {
 	struct ttf *ttf = container_of(work,
 				struct ttf, ttf_work.work);
-	int rc, ibatt_now, vbatt_now, ttf_now, charge_status, ibatt_avg;
+	int rc, ibatt_now, vbatt_now, ttf_now, charge_status, ibatt_avg,
+		msoc = 0, charge_done;
 	ktime_t ktime_now;
 
 	mutex_lock(&ttf->lock);
@@ -1422,8 +1423,25 @@ static void ttf_work(struct work_struct *work)
 		pr_err("failed to get charge_status rc=%d\n", rc);
 		goto end_work;
 	}
-	if (charge_status != POWER_SUPPLY_STATUS_CHARGING &&
-			charge_status != POWER_SUPPLY_STATUS_DISCHARGING)
+
+	rc =  ttf->get_ttf_param(ttf->data, TTF_CHG_DONE, &charge_done);
+	if (rc < 0) {
+		pr_err("failed to get charge_done rc=%d\n", rc);
+		goto end_work;
+	}
+
+	rc =  ttf->get_ttf_param(ttf->data, TTF_MSOC, &msoc);
+	if (rc < 0) {
+		pr_err("failed to get msoc, rc=%d\n", rc);
+		goto end_work;
+	}
+	pr_debug("TTF: charge_status:%d charge_done:%d msoc:%d\n",
+			charge_status, charge_done, msoc);
+	/*
+	 * Do not schedule ttf work when SOC is 100%
+	 * or charge terminated
+	 */
+	if ((msoc == 100) || charge_done)
 		goto end_work;
 
 	rc =  ttf->get_ttf_param(ttf->data, TTF_IBAT, &ibatt_now);
diff --git a/drivers/power/supply/qcom/fg-alg.h b/drivers/power/supply/qcom/fg-alg.h
index a21eaf3..34442c7 100644
--- a/drivers/power/supply/qcom/fg-alg.h
+++ b/drivers/power/supply/qcom/fg-alg.h
@@ -88,6 +88,7 @@ enum ttf_param {
 	TTF_CHG_TYPE,
 	TTF_CHG_STATUS,
 	TTF_TTE_VALID,
+	TTF_CHG_DONE,
 };
 
 struct ttf_circ_buf {
diff --git a/drivers/power/supply/qcom/hl6111r.c b/drivers/power/supply/qcom/hl6111r.c
index 1f2b132..2836da4 100644
--- a/drivers/power/supply/qcom/hl6111r.c
+++ b/drivers/power/supply/qcom/hl6111r.c
@@ -393,6 +393,7 @@ static enum power_supply_property hl6111r_psy_props[] = {
 	POWER_SUPPLY_PROP_TEMP,
 	POWER_SUPPLY_PROP_VOLTAGE_AVG,
 	POWER_SUPPLY_PROP_CURRENT_AVG,
+	POWER_SUPPLY_PROP_VOLTAGE_STEP,
 	POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
 	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
 	POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION,
@@ -433,6 +434,13 @@ static int hl6111r_get_prop(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_CURRENT_AVG:
 		rc = hl6111r_get_current_avg(chip, val);
 		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_STEP:
+		/*
+		 * Using only 20 mV for now, to correspond to range 0.
+		 * Return value in uV.
+		 */
+		*val = (hl6111r_vout_range[0].step_mv * 1000);
+		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
 		*val = HL6111R_MAX_VOLTAGE_UV;
 		break;
diff --git a/drivers/power/supply/qcom/qg-battery-profile.c b/drivers/power/supply/qcom/qg-battery-profile.c
index 69fff08..b670537 100644
--- a/drivers/power/supply/qcom/qg-battery-profile.c
+++ b/drivers/power/supply/qcom/qg-battery-profile.c
@@ -473,7 +473,7 @@ int qg_batterydata_init(struct device_node *profile_node)
 		if (IS_ERR_OR_NULL(battery->battery_device)) {
 			pr_err("Failed to create battery_device device\n");
 			rc = -ENODEV;
-			goto delete_cdev;
+			goto destroy_class;
 		}
 		the_battery = battery;
 	}
@@ -492,6 +492,8 @@ int qg_batterydata_init(struct device_node *profile_node)
 
 destroy_device:
 	device_destroy(battery->battery_class, battery->dev_no);
+destroy_class:
+	class_destroy(battery->battery_class);
 delete_cdev:
 	cdev_del(&battery->battery_cdev);
 unregister_chrdev:
@@ -523,6 +525,7 @@ void qg_batterydata_exit(void)
 	if (the_battery) {
 		/* unregister the device node */
 		device_destroy(the_battery->battery_class, the_battery->dev_no);
+		class_destroy(the_battery->battery_class);
 		cdev_del(&the_battery->battery_cdev);
 		unregister_chrdev_region(the_battery->dev_no, 1);
 		qg_battery_profile_free();
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen4.c b/drivers/power/supply/qcom/qpnp-fg-gen4.c
index 1ea05e7..5197e4f 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen4.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen4.c
@@ -1273,6 +1273,9 @@ static int fg_gen4_get_ttf_param(void *data, enum ttf_param param, int *val)
 	case TTF_CHG_STATUS:
 		*val = fg->charge_status;
 		break;
+	case TTF_CHG_DONE:
+		*val = fg->charge_done;
+		break;
 	default:
 		pr_err_ratelimited("Unsupported parameter %d\n", param);
 		rc = -EINVAL;
diff --git a/drivers/power/supply/qcom/qpnp-qg.c b/drivers/power/supply/qcom/qpnp-qg.c
index 08b123b..ddf0d88 100644
--- a/drivers/power/supply/qcom/qpnp-qg.c
+++ b/drivers/power/supply/qcom/qpnp-qg.c
@@ -1880,6 +1880,9 @@ static int qg_get_ttf_param(void *data, enum ttf_param param, int *val)
 	case TTF_CHG_STATUS:
 		*val = chip->charge_status;
 		break;
+	case TTF_CHG_DONE:
+		*val = chip->charge_done;
+		break;
 	default:
 		pr_err("Unsupported property %d\n", param);
 		rc = -EINVAL;
diff --git a/drivers/power/supply/qcom/qpnp-qnovo5.c b/drivers/power/supply/qcom/qpnp-qnovo5.c
index 8c30680..e2a5a38 100644
--- a/drivers/power/supply/qcom/qpnp-qnovo5.c
+++ b/drivers/power/supply/qcom/qpnp-qnovo5.c
@@ -668,9 +668,17 @@ static ssize_t ok_to_qnovo_show(struct class *c, struct class_attribute *attr,
 			char *buf)
 {
 	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
-	int val = get_effective_result(chip->not_ok_to_qnovo_votable);
+	int val, cp_dis, not_ok =
+		get_effective_result(chip->not_ok_to_qnovo_votable);
+	struct votable *cp_disable_votable = find_votable("CP_DISABLE");
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", !val);
+	val = !not_ok;
+	if (cp_disable_votable) {
+		cp_dis = get_effective_result(cp_disable_votable);
+		val = val && cp_dis;
+	}
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
 }
 static CLASS_ATTR_RO(ok_to_qnovo);
 
@@ -732,9 +740,9 @@ static ssize_t standalone_show(struct class *c, struct class_attribute *attr,
 	if (!cp_disable_votable)
 		return -ENODEV;
 
-	val = get_effective_result(cp_disable_votable);
+	val = get_client_vote(cp_disable_votable, QNOVO_VOTER);
 
-	return scnprintf(ubuf, PAGE_SIZE, "%d\n", !val);
+	return scnprintf(ubuf, PAGE_SIZE, "%d\n", val);
 }
 
 static ssize_t standalone_store(struct class *c, struct class_attribute *attr,
@@ -1166,6 +1174,7 @@ static void status_change_work(struct work_struct *work)
 	union power_supply_propval pval;
 	bool usb_present = false, hw_ok_to_qnovo = false;
 	int rc, battery_health, charge_status;
+	struct votable *cp_disable_votable = find_votable("CP_DISABLE");
 
 	if (is_usb_available(chip)) {
 		rc = power_supply_get_property(chip->usb_psy,
@@ -1179,6 +1188,9 @@ static void status_change_work(struct work_struct *work)
 		cancel_delayed_work_sync(&chip->usb_debounce_work);
 		vote(chip->awake_votable, USB_READY_VOTER, false, 0);
 		vote(chip->chg_ready_votable, USB_READY_VOTER, false, 0);
+		if (cp_disable_votable)
+			vote(cp_disable_votable, QNOVO_VOTER, false, 0);
+
 		if (chip->pinctrl) {
 			rc = pinctrl_select_state(chip->pinctrl,
 					chip->pinctrl_state1);
diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c
index 622ab11..160ea6d 100644
--- a/drivers/power/supply/qcom/qpnp-smb5.c
+++ b/drivers/power/supply/qcom/qpnp-smb5.c
@@ -325,20 +325,20 @@ static int smb5_chg_config_init(struct smb5 *chip)
 
 	switch (pmic_rev_id->pmic_subtype) {
 	case PM8150B_SUBTYPE:
-		chip->chg.smb_version = PM8150B_SUBTYPE;
+		chip->chg.chg_param.smb_version = PM8150B_SUBTYPE;
 		chg->param = smb5_pm8150b_params;
 		chg->name = "pm8150b_charger";
 		chg->wa_flags |= CHG_TERMINATION_WA;
 		break;
 	case PM7250B_SUBTYPE:
-		chip->chg.smb_version = PM7250B_SUBTYPE;
+		chip->chg.chg_param.smb_version = PM7250B_SUBTYPE;
 		chg->param = smb5_pm8150b_params;
 		chg->name = "pm7250b_charger";
 		chg->wa_flags |= CHG_TERMINATION_WA;
 		chg->uusb_moisture_protection_capable = true;
 		break;
 	case PM6150_SUBTYPE:
-		chip->chg.smb_version = PM6150_SUBTYPE;
+		chip->chg.chg_param.smb_version = PM6150_SUBTYPE;
 		chg->param = smb5_pm8150b_params;
 		chg->name = "pm6150_charger";
 		chg->wa_flags |= SW_THERM_REGULATION_WA | CHG_TERMINATION_WA;
@@ -347,9 +347,10 @@ static int smb5_chg_config_init(struct smb5 *chip)
 		chg->main_fcc_max = PM6150_MAX_FCC_UA;
 		break;
 	case PMI632_SUBTYPE:
-		chip->chg.smb_version = PMI632_SUBTYPE;
+		chip->chg.chg_param.smb_version = PMI632_SUBTYPE;
 		chg->wa_flags |= WEAK_ADAPTER_WA | USBIN_OV_WA
-				| CHG_TERMINATION_WA | USBIN_ADC_WA;
+				| CHG_TERMINATION_WA | USBIN_ADC_WA
+				| SKIP_MISC_PBS_IRQ_WA;
 		chg->param = smb5_pmi632_params;
 		chg->use_extcon = true;
 		chg->name = "pmi632_charger";
@@ -434,6 +435,8 @@ static int smb5_configure_internal_pull(struct smb_charger *chg, int type,
 #define OTG_DEFAULT_DEGLITCH_TIME_MS	50
 #define DEFAULT_WD_BARK_TIME		64
 #define DEFAULT_WD_SNARL_TIME_8S	0x07
+#define DEFAULT_FCC_STEP_SIZE_UA	100000
+#define DEFAULT_FCC_STEP_UPDATE_DELAY_MS	1000
 static int smb5_parse_dt_misc(struct smb5 *chip, struct device_node *node)
 {
 	int rc = 0, byte_len;
@@ -570,6 +573,29 @@ static int smb5_parse_dt_misc(struct smb5 *chip, struct device_node *node)
 	chip->dt.adc_based_aicl = of_property_read_bool(node,
 					"qcom,adc-based-aicl");
 
+	of_property_read_u32(node, "qcom,fcc-step-delay-ms",
+					&chg->chg_param.fcc_step_delay_ms);
+	if (chg->chg_param.fcc_step_delay_ms <= 0)
+		chg->chg_param.fcc_step_delay_ms =
+					DEFAULT_FCC_STEP_UPDATE_DELAY_MS;
+
+	of_property_read_u32(node, "qcom,fcc-step-size-ua",
+					&chg->chg_param.fcc_step_size_ua);
+	if (chg->chg_param.fcc_step_size_ua <= 0)
+		chg->chg_param.fcc_step_size_ua = DEFAULT_FCC_STEP_SIZE_UA;
+
+	/*
+	 * If property is present parallel charging with CP is disabled
+	 * with HVDCP3 adapter.
+	 */
+	chg->hvdcp3_standalone_config = of_property_read_bool(node,
+					"qcom,hvdcp3-standalone-config");
+
+	of_property_read_u32(node, "qcom,hvdcp3-max-icl-ua",
+					&chg->chg_param.hvdcp3_max_icl_ua);
+	if (chg->chg_param.hvdcp3_max_icl_ua <= 0)
+		chg->chg_param.hvdcp3_max_icl_ua = MICRO_3PA;
+
 	return 0;
 }
 
@@ -642,7 +668,8 @@ static int smb5_parse_dt_currents(struct smb5 *chip, struct device_node *node)
 	rc = of_property_read_u32(node,
 				"qcom,otg-cl-ua", &chg->otg_cl_ua);
 	if (rc < 0)
-		chg->otg_cl_ua = (chip->chg.smb_version == PMI632_SUBTYPE) ?
+		chg->otg_cl_ua =
+			(chip->chg.chg_param.smb_version == PMI632_SUBTYPE) ?
 							MICRO_1PA : MICRO_3PA;
 
 	rc = of_property_read_u32(node, "qcom,chg-term-src",
@@ -1278,20 +1305,26 @@ static int smb5_usb_main_set_prop(struct power_supply *psy,
 	struct smb5 *chip = power_supply_get_drvdata(psy);
 	struct smb_charger *chg = &chip->chg;
 	union power_supply_propval pval = {0, };
-	int rc = 0;
+	int rc = 0, offset_ua = 0;
 
 	switch (psp) {
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
 		rc = smblib_set_charge_param(chg, &chg->param.fv, val->intval);
 		break;
 	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
-		rc = smblib_set_charge_param(chg, &chg->param.fcc, val->intval);
+		/* Adjust Main FCC for QC3.0 + SMB1390 */
+		rc = smblib_get_qc3_main_icl_offset(chg, &offset_ua);
+		if (rc < 0)
+			offset_ua = 0;
+
+		rc = smblib_set_charge_param(chg, &chg->param.fcc,
+						val->intval + offset_ua);
 		break;
 	case POWER_SUPPLY_PROP_CURRENT_MAX:
 		rc = smblib_set_icl_current(chg, val->intval);
 		break;
 	case POWER_SUPPLY_PROP_FLASH_ACTIVE:
-		if ((chg->smb_version == PMI632_SUBTYPE)
+		if ((chg->chg_param.smb_version == PMI632_SUBTYPE)
 				&& (chg->flash_active != val->intval)) {
 			chg->flash_active = val->intval;
 
@@ -1337,6 +1370,9 @@ static int smb5_usb_main_set_prop(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_FORCE_MAIN_ICL:
 		vote_override(chg->usb_icl_votable, CC_MODE_VOTER,
 				(val->intval < 0) ? false : true, val->intval);
+		/* Main ICL updated re-calculate ILIM */
+		if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_HVDCP_3)
+			rerun_election(chg->fcc_votable);
 		break;
 	case POWER_SUPPLY_PROP_COMP_CLAMP_LEVEL:
 		rc = smb5_set_prop_comp_clamp_level(chg, val);
@@ -1499,6 +1535,7 @@ static int smb5_dc_prop_is_writeable(struct power_supply *psy,
 {
 	switch (psp) {
 	case POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION:
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
 		return 1;
 	default:
 		break;
@@ -2055,7 +2092,7 @@ static int smb5_configure_typec(struct smb_charger *chg)
 		return rc;
 	}
 
-	if (chg->smb_version != PMI632_SUBTYPE) {
+	if (chg->chg_param.smb_version != PMI632_SUBTYPE) {
 		rc = smblib_masked_write(chg, USBIN_LOAD_CFG_REG,
 				USBIN_IN_COLLAPSE_GF_SEL_MASK |
 				USBIN_AICL_STEP_TIMING_SEL_MASK,
@@ -2116,9 +2153,10 @@ static int smb5_configure_micro_usb(struct smb_charger *chg)
 		}
 
 		/* Disable periodic monitoring of CC_ID pin */
-		rc = smblib_write(chg, ((chg->smb_version == PMI632_SUBTYPE) ?
-			PMI632_TYPEC_U_USB_WATER_PROTECTION_CFG_REG :
-			TYPEC_U_USB_WATER_PROTECTION_CFG_REG), 0);
+		rc = smblib_write(chg,
+			((chg->chg_param.smb_version == PMI632_SUBTYPE) ?
+				PMI632_TYPEC_U_USB_WATER_PROTECTION_CFG_REG :
+				TYPEC_U_USB_WATER_PROTECTION_CFG_REG), 0);
 		if (rc < 0) {
 			dev_err(chg->dev, "Couldn't disable periodic monitoring of CC_ID rc=%d\n",
 				rc);
@@ -2142,7 +2180,7 @@ static int smb5_configure_iterm_thresholds_adc(struct smb5 *chip)
 	s16 raw_hi_thresh, raw_lo_thresh, max_limit_ma;
 	struct smb_charger *chg = &chip->chg;
 
-	if (chip->chg.smb_version == PMI632_SUBTYPE)
+	if (chip->chg.chg_param.smb_version == PMI632_SUBTYPE)
 		max_limit_ma = ITERM_LIMITS_PMI632_MA;
 	else
 		max_limit_ma = ITERM_LIMITS_PM8150B_MA;
@@ -2203,7 +2241,7 @@ static int smb5_configure_iterm_thresholds(struct smb5 *chip)
 
 	switch (chip->dt.term_current_src) {
 	case ITERM_SRC_ADC:
-		if (chip->chg.smb_version == PM8150B_SUBTYPE) {
+		if (chip->chg.chg_param.smb_version == PM8150B_SUBTYPE) {
 			rc = smblib_masked_write(chg, CHGR_ADC_TERM_CFG_REG,
 					TERM_BASED_ON_SYNC_CONV_OR_SAMPLE_CNT,
 					TERM_BASED_ON_SAMPLE_CNT);
@@ -2275,7 +2313,7 @@ static int smb5_init_dc_peripheral(struct smb_charger *chg)
 	int rc = 0;
 
 	/* PMI632 does not have DC peripheral */
-	if (chg->smb_version == PMI632_SUBTYPE)
+	if (chg->chg_param.smb_version == PMI632_SUBTYPE)
 		return 0;
 
 	/* Set DCIN ICL to 100 mA */
@@ -2413,7 +2451,7 @@ static int smb5_init_connector_type(struct smb_charger *chg)
 	 * PMI632 can have the connector type defined by a dedicated register
 	 * PMI632_TYPEC_MICRO_USB_MODE_REG or by a common TYPEC_U_USB_CFG_REG.
 	 */
-	if (chg->smb_version == PMI632_SUBTYPE) {
+	if (chg->chg_param.smb_version == PMI632_SUBTYPE) {
 		rc = smblib_read(chg, PMI632_TYPEC_MICRO_USB_MODE_REG, &val);
 		if (rc < 0) {
 			dev_err(chg->dev, "Couldn't read USB mode rc=%d\n", rc);
@@ -2458,7 +2496,7 @@ static int smb5_init_connector_type(struct smb_charger *chg)
 	 *   boots with charger connected.
 	 * - Initialize flash module for PMI632
 	 */
-	if (chg->smb_version == PMI632_SUBTYPE) {
+	if (chg->chg_param.smb_version == PMI632_SUBTYPE) {
 		schgm_flash_init(chg);
 		smblib_rerun_apsd_if_required(chg);
 	}
@@ -3416,7 +3454,7 @@ static int smb5_probe(struct platform_device *pdev)
 		}
 	}
 
-	switch (chg->smb_version) {
+	switch (chg->chg_param.smb_version) {
 	case PM8150B_SUBTYPE:
 	case PM6150_SUBTYPE:
 	case PM7250B_SUBTYPE:
diff --git a/drivers/power/supply/qcom/smb1390-charger-psy.c b/drivers/power/supply/qcom/smb1390-charger-psy.c
index 62e57da..7e0a399 100644
--- a/drivers/power/supply/qcom/smb1390-charger-psy.c
+++ b/drivers/power/supply/qcom/smb1390-charger-psy.c
@@ -1096,6 +1096,12 @@ static void smb1390_status_change_work(struct work_struct *work)
 			goto out;
 		}
 
+		/*
+		 * Slave SMB1390 is not required for the power-rating of QC3
+		 */
+		if (pval.intval != POWER_SUPPLY_CP_HVDCP3)
+			vote(chip->slave_disable_votable, SRC_VOTER, false, 0);
+
 		/* Check for SOC threshold only once before enabling CP */
 		vote(chip->disable_votable, SRC_VOTER, false, 0);
 		if (!chip->batt_soc_validated) {
@@ -1163,6 +1169,7 @@ static void smb1390_status_change_work(struct work_struct *work)
 		}
 	} else {
 		chip->batt_soc_validated = false;
+		vote(chip->slave_disable_votable, SRC_VOTER, true, 0);
 		vote(chip->disable_votable, SRC_VOTER, true, 0);
 		vote(chip->disable_votable, TAPER_END_VOTER, false, 0);
 		vote(chip->fcc_votable, CP_VOTER, false, 0);
@@ -1578,6 +1585,8 @@ static int smb1390_create_votables(struct smb1390 *chip)
 	if (IS_ERR(chip->slave_disable_votable))
 		return PTR_ERR(chip->slave_disable_votable);
 
+	/* Keep slave SMB disabled */
+	vote(chip->slave_disable_votable, SRC_VOTER, true, 0);
 	/*
 	 * charge pump is initially disabled; this indirectly votes to allow
 	 * traditional parallel charging if present
diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c
index de55e38..d34b9ff 100644
--- a/drivers/power/supply/qcom/smb5-lib.c
+++ b/drivers/power/supply/qcom/smb5-lib.c
@@ -16,7 +16,6 @@
 #include <linux/ktime.h>
 #include "smb5-lib.h"
 #include "smb5-reg.h"
-#include "battery.h"
 #include "schgm-flash.h"
 #include "step-chg-jeita.h"
 #include "storm-watch.h"
@@ -756,7 +755,7 @@ static int smblib_usb_pd_adapter_allowance_override(struct smb_charger *chg,
 {
 	int rc = 0;
 
-	if (chg->smb_version == PMI632_SUBTYPE)
+	if (chg->chg_param.smb_version == PMI632_SUBTYPE)
 		return 0;
 
 	rc = smblib_write(chg, USBIN_ADAPTER_ALLOW_OVERRIDE_REG,
@@ -803,7 +802,7 @@ static int smblib_set_usb_pd_allowed_voltage(struct smb_charger *chg,
 	int rc, aicl_threshold;
 	u8 vbus_allowance;
 
-	if (chg->smb_version == PMI632_SUBTYPE)
+	if (chg->chg_param.smb_version == PMI632_SUBTYPE)
 		return 0;
 
 	if (chg->pd_active == POWER_SUPPLY_PD_PPS_ACTIVE) {
@@ -873,6 +872,37 @@ int smblib_set_aicl_cont_threshold(struct smb_chg_param *param,
 /********************
  * HELPER FUNCTIONS *
  ********************/
+static bool is_cp_available(struct smb_charger *chg)
+{
+	if (!chg->cp_psy)
+		chg->cp_psy = power_supply_get_by_name("charge_pump_master");
+
+	return !!chg->cp_psy;
+}
+
+#define CP_TO_MAIN_ICL_OFFSET_PC		10
+int smblib_get_qc3_main_icl_offset(struct smb_charger *chg, int *offset_ua)
+{
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	if ((chg->real_charger_type != POWER_SUPPLY_TYPE_USB_HVDCP_3)
+		|| chg->hvdcp3_standalone_config || !is_cp_available(chg)) {
+		*offset_ua = 0;
+		return 0;
+	}
+
+	rc = power_supply_get_property(chg->cp_psy, POWER_SUPPLY_PROP_CP_ILIM,
+					&pval);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get CP ILIM rc=%d\n", rc);
+		return rc;
+	}
+
+	*offset_ua = (pval.intval * CP_TO_MAIN_ICL_OFFSET_PC * 2) / 100;
+
+	return 0;
+}
 
 int smblib_get_prop_from_bms(struct smb_charger *chg,
 				enum power_supply_property psp,
@@ -1052,6 +1082,11 @@ static int smblib_notifier_call(struct notifier_block *nb,
 		schedule_work(&chg->pl_update_work);
 	}
 
+	if (!strcmp(psy->desc->name, "charge_pump_master")) {
+		pm_stay_awake(chg->dev);
+		schedule_work(&chg->cp_status_change_work);
+	}
+
 	return NOTIFY_OK;
 }
 
@@ -1417,6 +1452,11 @@ int smblib_get_irq_status(struct smb_charger *chg,
 	int rc;
 	u8 reg;
 
+	if (chg->wa_flags & SKIP_MISC_PBS_IRQ_WA) {
+		val->intval = 0;
+		return 0;
+	}
+
 	mutex_lock(&chg->irq_status_lock);
 	/* Report and clear cached status */
 	val->intval = chg->irq_status;
@@ -1474,8 +1514,8 @@ static int smblib_set_moisture_protection(struct smb_charger *chg,
 
 		/* Set 1% duty cycle on ID detection */
 		rc = smblib_masked_write(chg,
-				((chg->smb_version == PMI632_SUBTYPE) ?
-				PMI632_TYPEC_U_USB_WATER_PROTECTION_CFG_REG :
+				((chg->chg_param.smb_version == PMI632_SUBTYPE)
+				? PMI632_TYPEC_U_USB_WATER_PROTECTION_CFG_REG :
 				TYPEC_U_USB_WATER_PROTECTION_CFG_REG),
 				EN_MICRO_USB_WATER_PROTECTION_BIT |
 				MICRO_USB_DETECTION_ON_TIME_CFG_MASK |
@@ -1507,8 +1547,9 @@ static int smblib_set_moisture_protection(struct smb_charger *chg,
 		}
 
 		/* Disable periodic monitoring of CC_ID pin */
-		rc = smblib_write(chg, ((chg->smb_version == PMI632_SUBTYPE) ?
-				PMI632_TYPEC_U_USB_WATER_PROTECTION_CFG_REG :
+		rc = smblib_write(chg,
+				((chg->chg_param.smb_version == PMI632_SUBTYPE)
+				? PMI632_TYPEC_U_USB_WATER_PROTECTION_CFG_REG :
 				TYPEC_U_USB_WATER_PROTECTION_CFG_REG), 0);
 		if (rc < 0) {
 			smblib_err(chg, "Couldn't disable 1 percent CC_ID duty cycle rc=%d\n",
@@ -1556,7 +1597,7 @@ static int smblib_dc_suspend_vote_callback(struct votable *votable, void *data,
 {
 	struct smb_charger *chg = data;
 
-	if (chg->smb_version == PMI632_SUBTYPE)
+	if (chg->chg_param.smb_version == PMI632_SUBTYPE)
 		return 0;
 
 	/* resume input if suspend is invalid */
@@ -2156,7 +2197,7 @@ int smblib_get_prop_batt_iterm(struct smb_charger *chg,
 	temp = buf[1] | (buf[0] << 8);
 	temp = sign_extend32(temp, 15);
 
-	if (chg->smb_version == PMI632_SUBTYPE)
+	if (chg->chg_param.smb_version == PMI632_SUBTYPE)
 		temp = DIV_ROUND_CLOSEST(temp * ITERM_LIMITS_PMI632_MA,
 					ADC_CHG_ITERM_MASK);
 	else
@@ -2675,10 +2716,7 @@ static int smblib_update_thermal_readings(struct smb_charger *chg)
 	}
 
 	if (chg->sec_chg_selected == POWER_SUPPLY_CHARGER_SEC_CP) {
-		if (!chg->cp_psy)
-			chg->cp_psy =
-				power_supply_get_by_name("charge_pump_master");
-		if (chg->cp_psy) {
+		if (is_cp_available(chg)) {
 			rc = power_supply_get_property(chg->cp_psy,
 				POWER_SUPPLY_PROP_CP_DIE_TEMP, &pval);
 			if (rc < 0) {
@@ -2895,7 +2933,7 @@ int smblib_get_prop_dc_present(struct smb_charger *chg,
 	int rc;
 	u8 stat;
 
-	if (chg->smb_version == PMI632_SUBTYPE) {
+	if (chg->chg_param.smb_version == PMI632_SUBTYPE) {
 		val->intval = 0;
 		return 0;
 	}
@@ -2916,7 +2954,7 @@ int smblib_get_prop_dc_online(struct smb_charger *chg,
 	int rc = 0;
 	u8 stat;
 
-	if (chg->smb_version == PMI632_SUBTYPE) {
+	if (chg->chg_param.smb_version == PMI632_SUBTYPE) {
 		val->intval = 0;
 		return 0;
 	}
@@ -3169,7 +3207,7 @@ int smblib_get_prop_usb_voltage_max_design(struct smb_charger *chg,
 		/* else, fallthrough */
 	case POWER_SUPPLY_TYPE_USB_HVDCP_3:
 	case POWER_SUPPLY_TYPE_USB_PD:
-		if (chg->smb_version == PMI632_SUBTYPE)
+		if (chg->chg_param.smb_version == PMI632_SUBTYPE)
 			val->intval = MICRO_9V;
 		else
 			val->intval = MICRO_12V;
@@ -3197,7 +3235,7 @@ int smblib_get_prop_usb_voltage_max(struct smb_charger *chg,
 		}
 		/* else, fallthrough */
 	case POWER_SUPPLY_TYPE_USB_HVDCP_3:
-		if (chg->smb_version == PMI632_SUBTYPE)
+		if (chg->chg_param.smb_version == PMI632_SUBTYPE)
 			val->intval = MICRO_9V;
 		else
 			val->intval = MICRO_12V;
@@ -3319,7 +3357,7 @@ int smblib_get_prop_usb_voltage_now(struct smb_charger *chg,
 	 * to occur randomly in the USBIN channel, particularly at high
 	 * voltages.
 	 */
-	if (chg->smb_version == PM8150B_SUBTYPE && pval.intval)
+	if (chg->chg_param.smb_version == PM8150B_SUBTYPE && pval.intval)
 		rc = smblib_read_mid_voltage_chan(chg, val);
 	else
 		rc = smblib_read_usbin_voltage_chan(chg, val);
@@ -3689,7 +3727,7 @@ int smblib_get_prop_usb_current_now(struct smb_charger *chg,
 		 * For PMI632, scaling factor = reciprocal of
 		 * 0.4V/A in Buck mode, 0.8V/A in Boost mode.
 		 */
-		switch (chg->smb_version) {
+		switch (chg->chg_param.smb_version) {
 		case PMI632_SUBTYPE:
 			buck_scale = 40;
 			boost_scale = 80;
@@ -4010,7 +4048,7 @@ int smblib_get_prop_connector_health(struct smb_charger *chg)
 	 * In PM8150B, SKIN channel measures Wireless charger receiver
 	 * temp, used to regulate DC ICL.
 	 */
-	if (chg->smb_version == PM8150B_SUBTYPE && dc_present)
+	if (chg->chg_param.smb_version == PM8150B_SUBTYPE && dc_present)
 		return smblib_get_skin_temp_status(chg);
 
 	return POWER_SUPPLY_HEALTH_COOL;
@@ -4979,7 +5017,7 @@ irqreturn_t usbin_uv_irq_handler(int irq, void *data)
 
 unsuspend_input:
 		/* Force torch in boost mode to ensure it works with low ICL */
-		if (chg->smb_version == PMI632_SUBTYPE)
+		if (chg->chg_param.smb_version == PMI632_SUBTYPE)
 			schgm_flash_torch_priority(chg, TORCH_BOOST_MODE);
 
 		if (chg->aicl_max_reached) {
@@ -5243,7 +5281,7 @@ void smblib_usb_plugin_locked(struct smb_charger *chg)
 					chg->aicl_cont_threshold_mv);
 			chg->aicl_max_reached = false;
 
-			if (chg->smb_version == PMI632_SUBTYPE)
+			if (chg->chg_param.smb_version == PMI632_SUBTYPE)
 				schgm_flash_torch_priority(chg,
 						TORCH_BUCK_MODE);
 
@@ -6206,12 +6244,8 @@ static bool is_cp_topo_vbatt(struct smb_charger *chg)
 	bool is_vbatt;
 	union power_supply_propval pval;
 
-	if (!chg->cp_psy) {
-		chg->cp_psy = power_supply_get_by_name("charge_pump_master");
-
-		if (!chg->cp_psy)
-			return false;
-	}
+	if (!is_cp_available(chg))
+		return false;
 
 	rc = power_supply_get_property(chg->cp_psy,
 				POWER_SUPPLY_PROP_PARALLEL_OUTPUT_MODE, &pval);
@@ -6820,7 +6854,7 @@ static void smblib_moisture_protection_work(struct work_struct *work)
 	 * Disable 1% duty cycle on CC_ID pin and enable uUSB factory mode
 	 * detection to track any change on RID, as interrupts are disable.
 	 */
-	rc = smblib_write(chg, ((chg->smb_version == PMI632_SUBTYPE) ?
+	rc = smblib_write(chg, ((chg->chg_param.smb_version == PMI632_SUBTYPE) ?
 			PMI632_TYPEC_U_USB_WATER_PROTECTION_CFG_REG :
 			TYPEC_U_USB_WATER_PROTECTION_CFG_REG), 0);
 	if (rc < 0) {
@@ -7232,6 +7266,38 @@ static void smblib_lpd_detach_work(struct work_struct *work)
 		chg->lpd_stage = LPD_STAGE_NONE;
 }
 
+static void smblib_cp_status_change_work(struct work_struct *work)
+{
+	int rc;
+	union power_supply_propval pval;
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+			cp_status_change_work);
+
+	if (!chg->cp_psy)
+		chg->cp_psy = power_supply_get_by_name("charge_pump_master");
+
+	if (!chg->cp_psy)
+		goto relax;
+
+	if (chg->cp_topo == -EINVAL) {
+		rc = power_supply_get_property(chg->cp_psy,
+				POWER_SUPPLY_PROP_PARALLEL_OUTPUT_MODE, &pval);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't read cp topo rc=%d\n", rc);
+			goto relax;
+		}
+
+		chg->cp_topo = pval.intval;
+
+		if (chg->cp_topo == POWER_SUPPLY_PL_OUTPUT_VBAT &&
+				chg->cp_reason == POWER_SUPPLY_CP_WIRELESS)
+			vote(chg->fcc_main_votable, WLS_PL_CHARGING_VOTER, true,
+					800000);
+	}
+relax:
+	pm_relax(chg->dev);
+}
+
 static int smblib_create_votables(struct smb_charger *chg)
 {
 	int rc = 0;
@@ -7408,6 +7474,7 @@ int smblib_init(struct smb_charger *chg)
 	INIT_WORK(&chg->pl_update_work, pl_update_work);
 	INIT_WORK(&chg->jeita_update_work, jeita_update_work);
 	INIT_WORK(&chg->dcin_aicl_work, dcin_aicl_work);
+	INIT_WORK(&chg->cp_status_change_work, smblib_cp_status_change_work);
 	INIT_DELAYED_WORK(&chg->clear_hdc_work, clear_hdc_work);
 	INIT_DELAYED_WORK(&chg->icl_change_work, smblib_icl_change_work);
 	INIT_DELAYED_WORK(&chg->pl_enable_work, smblib_pl_enable_work);
@@ -7466,10 +7533,11 @@ int smblib_init(struct smb_charger *chg)
 	chg->cp_reason = POWER_SUPPLY_CP_NONE;
 	chg->thermal_status = TEMP_BELOW_RANGE;
 	chg->typec_irq_en = true;
+	chg->cp_topo = -EINVAL;
 
 	switch (chg->mode) {
 	case PARALLEL_MASTER:
-		rc = qcom_batt_init(chg->smb_version);
+		rc = qcom_batt_init(&chg->chg_param);
 		if (rc < 0) {
 			smblib_err(chg, "Couldn't init qcom_batt_init rc=%d\n",
 				rc);
@@ -7562,6 +7630,7 @@ int smblib_deinit(struct smb_charger *chg)
 		cancel_work_sync(&chg->jeita_update_work);
 		cancel_work_sync(&chg->pl_update_work);
 		cancel_work_sync(&chg->dcin_aicl_work);
+		cancel_work_sync(&chg->cp_status_change_work);
 		cancel_delayed_work_sync(&chg->clear_hdc_work);
 		cancel_delayed_work_sync(&chg->icl_change_work);
 		cancel_delayed_work_sync(&chg->pl_enable_work);
diff --git a/drivers/power/supply/qcom/smb5-lib.h b/drivers/power/supply/qcom/smb5-lib.h
index d8ed55a..306dc8c 100644
--- a/drivers/power/supply/qcom/smb5-lib.h
+++ b/drivers/power/supply/qcom/smb5-lib.h
@@ -14,6 +14,7 @@
 #include <linux/regulator/consumer.h>
 #include <linux/extcon-provider.h>
 #include "storm-watch.h"
+#include "battery.h"
 
 enum print_reason {
 	PR_INTERRUPT	= BIT(0),
@@ -124,6 +125,7 @@ enum {
 	USBIN_OV_WA			= BIT(3),
 	CHG_TERMINATION_WA		= BIT(4),
 	USBIN_ADC_WA			= BIT(5),
+	SKIP_MISC_PBS_IRQ_WA		= BIT(6),
 };
 
 enum jeita_cfg_stat {
@@ -376,7 +378,6 @@ struct smb_charger {
 	int			pd_disabled;
 	enum smb_mode		mode;
 	struct smb_chg_freq	chg_freq;
-	int			smb_version;
 	int			otg_delay_ms;
 	int			weak_chg_icl_ua;
 	bool			pd_not_supported;
@@ -439,6 +440,7 @@ struct smb_charger {
 	struct work_struct	moisture_protection_work;
 	struct work_struct	chg_termination_work;
 	struct work_struct	dcin_aicl_work;
+	struct work_struct	cp_status_change_work;
 	struct delayed_work	ps_change_timeout_work;
 	struct delayed_work	clear_hdc_work;
 	struct delayed_work	icl_change_work;
@@ -457,11 +459,13 @@ struct smb_charger {
 	struct alarm		chg_termination_alarm;
 	struct alarm		dcin_aicl_alarm;
 
+	struct charger_param	chg_param;
 	/* secondary charger config */
 	bool			sec_pl_present;
 	bool			sec_cp_present;
 	int			sec_chg_selected;
 	int			cp_reason;
+	int			cp_topo;
 
 	/* pd */
 	int			voltage_min_uv;
@@ -549,6 +553,7 @@ struct smb_charger {
 	u32			comp_clamp_level;
 	int			wls_icl_ua;
 	bool			dcin_aicl_done;
+	bool			hvdcp3_standalone_config;
 
 	/* workaround flag */
 	u32			wa_flags;
@@ -795,6 +800,7 @@ void smblib_apsd_enable(struct smb_charger *chg, bool enable);
 int smblib_force_vbus_voltage(struct smb_charger *chg, u8 val);
 int smblib_get_irq_status(struct smb_charger *chg,
 				union power_supply_propval *val);
+int smblib_get_qc3_main_icl_offset(struct smb_charger *chg, int *offset_ua);
 
 int smblib_init(struct smb_charger *chg);
 int smblib_deinit(struct smb_charger *chg);
diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c
index 8ba6abf..3958ee0 100644
--- a/drivers/power/supply/sbs-battery.c
+++ b/drivers/power/supply/sbs-battery.c
@@ -323,17 +323,22 @@ static int sbs_get_battery_presence_and_health(
 {
 	int ret;
 
-	if (psp == POWER_SUPPLY_PROP_PRESENT) {
-		/* Dummy command; if it succeeds, battery is present. */
-		ret = sbs_read_word_data(client, sbs_data[REG_STATUS].addr);
-		if (ret < 0)
-			val->intval = 0; /* battery disconnected */
-		else
-			val->intval = 1; /* battery present */
-	} else { /* POWER_SUPPLY_PROP_HEALTH */
+	/* Dummy command; if it succeeds, battery is present. */
+	ret = sbs_read_word_data(client, sbs_data[REG_STATUS].addr);
+
+	if (ret < 0) { /* battery not present*/
+		if (psp == POWER_SUPPLY_PROP_PRESENT) {
+			val->intval = 0;
+			return 0;
+		}
+		return ret;
+	}
+
+	if (psp == POWER_SUPPLY_PROP_PRESENT)
+		val->intval = 1; /* battery present */
+	else /* POWER_SUPPLY_PROP_HEALTH */
 		/* SBS spec doesn't have a general health command. */
 		val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
-	}
 
 	return 0;
 }
@@ -629,12 +634,14 @@ static int sbs_get_property(struct power_supply *psy,
 	switch (psp) {
 	case POWER_SUPPLY_PROP_PRESENT:
 	case POWER_SUPPLY_PROP_HEALTH:
-		if (client->flags & SBS_FLAGS_TI_BQ20Z75)
+		if (chip->flags & SBS_FLAGS_TI_BQ20Z75)
 			ret = sbs_get_ti_battery_presence_and_health(client,
 								     psp, val);
 		else
 			ret = sbs_get_battery_presence_and_health(client, psp,
 								  val);
+
+		/* this can only be true if no gpio is used */
 		if (psp == POWER_SUPPLY_PROP_PRESENT)
 			return 0;
 		break;
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index 0059b24c..28e1f64 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -58,6 +58,12 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 	/* Calculate the period and prescaler value */
 	div = (unsigned long long)clk_get_rate(priv->clk) * state->period;
 	do_div(div, NSEC_PER_SEC);
+	if (!div) {
+		/* Clock is too slow to achieve requested period. */
+		dev_dbg(priv->chip.dev, "Can't reach %u ns\n",	state->period);
+		return -EINVAL;
+	}
+
 	prd = div;
 	while (div > STM32_LPTIM_MAX_ARR) {
 		presc++;
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 93b2862..674d848 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -372,7 +372,7 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
 		goto error;
 	}
 	/* Check for trailing stuff. */
-	if (i == num_devices && strlen(buf) > 0) {
+	if (i == num_devices && buf && strlen(buf) > 0) {
 		rc = -EINVAL;
 		goto error;
 	}
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index aea5029..df09ed5 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -1213,6 +1213,8 @@ device_initcall(cio_settle_init);
 
 int sch_is_pseudo_sch(struct subchannel *sch)
 {
+	if (!sch->dev.parent)
+		return 0;
 	return sch == to_css(sch->dev.parent)->pseudo_subchannel;
 }
 
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index aff073a..df888506 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -21,6 +21,11 @@
 
 struct kmem_cache *zfcp_fsf_qtcb_cache;
 
+static bool ber_stop = true;
+module_param(ber_stop, bool, 0600);
+MODULE_PARM_DESC(ber_stop,
+		 "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)");
+
 static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
 {
 	struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
@@ -230,10 +235,15 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
 	case FSF_STATUS_READ_SENSE_DATA_AVAIL:
 		break;
 	case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
-		dev_warn(&adapter->ccw_device->dev,
-			 "The error threshold for checksum statistics "
-			 "has been exceeded\n");
 		zfcp_dbf_hba_bit_err("fssrh_3", req);
+		if (ber_stop) {
+			dev_warn(&adapter->ccw_device->dev,
+				 "All paths over this FCP device are disused because of excessive bit errors\n");
+			zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b");
+		} else {
+			dev_warn(&adapter->ccw_device->dev,
+				 "The error threshold for checksum statistics has been exceeded\n");
+		}
 		break;
 	case FSF_STATUS_READ_LINK_DOWN:
 		zfcp_fsf_status_read_link_down(req);
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 1c5051b..9e28792 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -578,7 +578,6 @@ ch_release(struct inode *inode, struct file *file)
 	scsi_changer *ch = file->private_data;
 
 	scsi_device_put(ch->device);
-	ch->device = NULL;
 	file->private_data = NULL;
 	kref_put(&ch->ref, ch_destroy);
 	return 0;
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 8c71541..a84878f 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4189,11 +4189,11 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 		 */
 		if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ &&
 		    pdev->subsystem_device == 0xC000)
-		   	return -ENODEV;
+			goto out_disable_device;
 		/* Now check the magic signature byte */
 		pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic);
 		if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE)
-			return -ENODEV;
+			goto out_disable_device;
 		/* Ok it is probably a megaraid */
 	}
 
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 7a1cc0b..d6dc320 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1023,6 +1023,7 @@ void qlt_free_session_done(struct work_struct *work)
 
 	if (logout_started) {
 		bool traced = false;
+		u16 cnt = 0;
 
 		while (!READ_ONCE(sess->logout_completed)) {
 			if (!traced) {
@@ -1032,6 +1033,9 @@ void qlt_free_session_done(struct work_struct *work)
 				traced = true;
 			}
 			msleep(100);
+			cnt++;
+			if (cnt > 200)
+				break;
 		}
 
 		ql_dbg(ql_dbg_disc, vha, 0xf087,
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index b7a8fdfe..e731af5 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -970,6 +970,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
 	ses->sdb = scmd->sdb;
 	ses->next_rq = scmd->request->next_rq;
 	ses->result = scmd->result;
+	ses->resid_len = scmd->req.resid_len;
 	ses->underflow = scmd->underflow;
 	ses->prot_op = scmd->prot_op;
 	ses->eh_eflags = scmd->eh_eflags;
@@ -981,6 +982,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
 	memset(&scmd->sdb, 0, sizeof(scmd->sdb));
 	scmd->request->next_rq = NULL;
 	scmd->result = 0;
+	scmd->req.resid_len = 0;
 
 	if (sense_bytes) {
 		scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
@@ -1034,6 +1036,7 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
 	scmd->sdb = ses->sdb;
 	scmd->request->next_rq = ses->next_rq;
 	scmd->result = ses->result;
+	scmd->req.resid_len = ses->resid_len;
 	scmd->underflow = ses->underflow;
 	scmd->prot_op = ses->prot_op;
 	scmd->eh_eflags = ses->eh_eflags;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 45e771d..8856b16 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -723,6 +723,14 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr,
 		  const char *buf, size_t count)
 {
 	struct kernfs_node *kn;
+	struct scsi_device *sdev = to_scsi_device(dev);
+
+	/*
+	 * We need to try to get module, avoiding the module been removed
+	 * during delete.
+	 */
+	if (scsi_device_get(sdev))
+		return -ENODEV;
 
 	kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
 	WARN_ON_ONCE(!kn);
@@ -737,9 +745,10 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr,
 	 * state into SDEV_DEL.
 	 */
 	device_remove_file(dev, attr);
-	scsi_remove_device(to_scsi_device(dev));
+	scsi_remove_device(sdev);
 	if (kn)
 		sysfs_unbreak_active_protection(kn);
+	scsi_device_put(sdev);
 	return count;
 };
 static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index e102edf..745ebb0 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1572,7 +1572,8 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
 		/* we need to evaluate the error return  */
 		if (scsi_sense_valid(sshdr) &&
 			(sshdr->asc == 0x3a ||	/* medium not present */
-			 sshdr->asc == 0x20))	/* invalid command */
+			 sshdr->asc == 0x20 ||	/* invalid command */
+			 (sshdr->asc == 0x74 && sshdr->ascq == 0x71)))	/* drive is password locked */
 				/* this is no error here */
 				return 0;
 
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 38e2731..6a7141e 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -2830,6 +2830,11 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba, bool no_sleep)
 	ufs_qcom_ice_print_regs(host);
 }
 
+static u32 ufs_qcom_get_user_cap_mode(struct ufs_hba *hba)
+{
+	return UFS_WB_BUFF_PRESERVE_USER_SPACE;
+}
+
 /**
  * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
  *
@@ -2856,6 +2861,7 @@ static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
 #ifdef CONFIG_DEBUG_FS
 	.add_debugfs		= ufs_qcom_dbg_add_debugfs,
 #endif
+	.get_user_cap_mode	= ufs_qcom_get_user_cap_mode,
 };
 
 static struct ufs_hba_crypto_variant_ops ufs_hba_crypto_variant_ops = {
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 2da1b15..371f160 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -137,7 +137,7 @@ enum desc_header_offset {
 };
 
 enum ufs_desc_def_size {
-	QUERY_DESC_DEVICE_DEF_SIZE		= 0x40,
+	QUERY_DESC_DEVICE_DEF_SIZE		= 0x59,
 	QUERY_DESC_CONFIGURATION_DEF_SIZE	= 0x90,
 	QUERY_DESC_UNIT_DEF_SIZE		= 0x23,
 	QUERY_DESC_INTERCONNECT_DEF_SIZE	= 0x06,
@@ -204,6 +204,7 @@ enum device_desc_param {
 	DEVICE_DESC_PARAM_PSA_MAX_DATA		= 0x25,
 	DEVICE_DESC_PARAM_PSA_TMT		= 0x29,
 	DEVICE_DESC_PARAM_PRDCT_REV		= 0x2A,
+	DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP	= 0x4F,
 };
 
 /* Interconnect descriptor parameters offsets in bytes*/
@@ -374,6 +375,38 @@ enum ufs_dev_pwr_mode {
 	UFS_POWERDOWN_PWR_MODE	= 3,
 };
 
+enum ufs_dev_wb_buf_avail_size {
+	UFS_WB_0_PERCENT_BUF_REMAIN = 0x0,
+	UFS_WB_10_PERCENT_BUF_REMAIN = 0x1,
+	UFS_WB_20_PERCENT_BUF_REMAIN = 0x2,
+	UFS_WB_30_PERCENT_BUF_REMAIN = 0x3,
+	UFS_WB_40_PERCENT_BUF_REMAIN = 0x4,
+	UFS_WB_50_PERCENT_BUF_REMAIN = 0x5,
+	UFS_WB_60_PERCENT_BUF_REMAIN = 0x6,
+	UFS_WB_70_PERCENT_BUF_REMAIN = 0x7,
+	UFS_WB_80_PERCENT_BUF_REMAIN = 0x8,
+	UFS_WB_90_PERCENT_BUF_REMAIN = 0x9,
+	UFS_WB_100_PERCENT_BUF_REMAIN = 0xA,
+};
+
+enum ufs_dev_wb_buf_life_time_est {
+	UFS_WB_0_10_PERCENT_USED = 0x1,
+	UFS_WB_10_20_PERCENT_USED = 0x2,
+	UFS_WB_20_30_PERCENT_USED = 0x3,
+	UFS_WB_30_40_PERCENT_USED = 0x4,
+	UFS_WB_40_50_PERCENT_USED = 0x5,
+	UFS_WB_50_60_PERCENT_USED = 0x6,
+	UFS_WB_60_70_PERCENT_USED = 0x7,
+	UFS_WB_70_80_PERCENT_USED = 0x8,
+	UFS_WB_80_90_PERCENT_USED = 0x9,
+	UFS_WB_90_100_PERCENT_USED = 0xA,
+	UFS_WB_MAX_USED = 0xB,
+};
+
+enum ufs_dev_wb_buf_user_cap_config {
+	UFS_WB_BUFF_PRESERVE_USER_SPACE = 1,
+	UFS_WB_BUFF_USER_SPACE_RED_EN = 2,
+};
 /**
  * struct utp_upiu_header - UPIU header structure
  * @dword_0: UPIU header DW-0
@@ -555,12 +588,18 @@ enum {
 	UFS_DEV_REMOVABLE_NON_BOOTABLE	= 0x03,
 };
 
+/* Possible values for dExtendedUFSFeaturesSupport */
+enum {
+	UFS_DEV_WRITE_BOOSTER_SUP	= BIT(8),
+};
+
 struct ufs_dev_info {
 	/* device descriptor info */
 	u8	b_device_sub_class;
 	u16	w_manufacturer_id;
 	u8	i_product_name;
 	u16	w_spec_version;
+	u32	d_ext_ufs_feature_sup;
 
 	/* query flags */
 	bool f_power_on_wp_en;
@@ -572,6 +611,8 @@ struct ufs_dev_info {
 
 	/* Device deviations from standard UFS device spec. */
 	unsigned int quirks;
+
+	bool keep_vcc_on;
 };
 
 #define MAX_MODEL_LEN 16
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index b6f1cf5..60169a3 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -24,6 +24,7 @@
 #define UFS_VENDOR_TOSHIBA     0x198
 #define UFS_VENDOR_SAMSUNG     0x1CE
 #define UFS_VENDOR_SKHYNIX     0x1AD
+#define UFS_VENDOR_WDC         0x145
 
 /**
  * ufs_dev_fix - ufs device quirk info
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index c2397d9..d858fec 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -42,6 +42,7 @@
 #include <linux/nls.h>
 #include <linux/of.h>
 #include <linux/bitfield.h>
+#include <linux/blkdev.h>
 #include <linux/suspend.h>
 #include "ufshcd.h"
 #include "ufs_quirks.h"
@@ -50,6 +51,13 @@
 #include "ufs-debugfs.h"
 #include "ufs-qcom.h"
 
+static bool ufshcd_wb_sup(struct ufs_hba *hba);
+static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
+static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba);
+static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba);
+static bool ufshcd_wb_is_buf_flush_needed(struct ufs_hba *hba);
+static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
+
 #ifdef CONFIG_DEBUG_FS
 
 static int ufshcd_tag_req_type(struct request *rq)
@@ -376,6 +384,38 @@ static inline bool ufshcd_is_card_offline(struct ufs_hba *hba)
 	return (atomic_read(&hba->card_state) == UFS_CARD_STATE_OFFLINE);
 }
 
+static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba)
+{
+	/*
+	 * Query dAvailableWriteBoosterBufferSize attribute and enable
+	 * the Write BoosterBuffer Flush if only 30% Write Booster
+	 * Buffer is available.
+	 * In reduction case, flush only if 10% is available
+	 */
+	if (ufshcd_wb_is_buf_flush_needed(hba))
+		ufshcd_wb_buf_flush_enable(hba);
+	else
+		ufshcd_wb_buf_flush_disable(hba);
+}
+
+static inline void ufshcd_wb_config(struct ufs_hba *hba)
+{
+	int ret;
+
+	if (!ufshcd_wb_sup(hba))
+		return;
+
+	ret = ufshcd_wb_ctrl(hba, true);
+	if (ret)
+		dev_err(hba->dev, "%s: Enable WB failed: %d\n", __func__, ret);
+	else
+		dev_info(hba->dev, "%s: Write Booster Configured\n", __func__);
+	ret = ufshcd_wb_toggle_flush_during_h8(hba, true);
+	if (ret)
+		dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n",
+			__func__, ret);
+}
+
 static inline bool ufshcd_is_device_offline(struct ufs_hba *hba)
 {
 	if (hba->extcon && ufshcd_is_card_offline(hba))
@@ -430,6 +470,8 @@ static struct ufs_dev_fix ufs_fixups[] = {
 		UFS_DEVICE_NO_FASTAUTO),
 	UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
 		UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
+	UFS_FIX(UFS_VENDOR_WDC, UFS_ANY_MODEL,
+		UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
 	UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
 	UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
 		UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
@@ -1833,6 +1875,10 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
 				hba->clk_gating.delay_ms_pwr_save;
 	}
 
+	/* Enable Write Booster if we have scaled up else disable it */
+	up_write(&hba->lock);
+	ufshcd_wb_ctrl(hba, scale_up);
+	down_write(&hba->lock);
 	goto clk_scaling_unprepare;
 
 scale_up_gear:
@@ -6880,6 +6926,163 @@ static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
 				__func__, err);
 }
 
+static bool ufshcd_wb_sup(struct ufs_hba *hba)
+{
+	return !!(hba->dev_info.d_ext_ufs_feature_sup &
+		  UFS_DEV_WRITE_BOOSTER_SUP);
+}
+
+static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
+{
+	int ret;
+	enum query_opcode opcode;
+
+	if (!ufshcd_wb_sup(hba))
+		return 0;
+
+	if (enable)
+		opcode = UPIU_QUERY_OPCODE_SET_FLAG;
+	else
+		opcode = UPIU_QUERY_OPCODE_CLEAR_FLAG;
+
+	ret = ufshcd_query_flag_retry(hba, opcode,
+				      QUERY_FLAG_IDN_WB_EN, NULL);
+	if (ret) {
+		dev_err(hba->dev, "%s write booster %s failed %d\n",
+			__func__, enable ? "enable" : "disable", ret);
+		return ret;
+	}
+
+	hba->wb_enabled = enable;
+	dev_dbg(hba->dev, "%s write booster %s %d\n",
+			__func__, enable ? "enable" : "disable", ret);
+
+	return ret;
+}
+
+static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
+{
+	int val;
+
+	if (set)
+		val =  UPIU_QUERY_OPCODE_SET_FLAG;
+	else
+		val = UPIU_QUERY_OPCODE_CLEAR_FLAG;
+
+	return ufshcd_query_flag_retry(hba, val,
+			       QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8,
+				       NULL);
+}
+
+static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba)
+{
+	int ret;
+
+	if (!ufshcd_wb_sup(hba) || hba->wb_buf_flush_enabled)
+		return 0;
+
+	ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+				      QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN, NULL);
+	if (ret)
+		dev_err(hba->dev, "%s WB - buf flush enable failed %d\n",
+			__func__, ret);
+	else
+		hba->wb_buf_flush_enabled = true;
+
+	dev_dbg(hba->dev, "WB - Flush enabled: %d\n", ret);
+	return ret;
+}
+
+static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba)
+{
+	int ret;
+
+	if (!ufshcd_wb_sup(hba) || !hba->wb_buf_flush_enabled)
+		return 0;
+
+	ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
+				      QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN, NULL);
+	if (ret) {
+		dev_warn(hba->dev, "%s: WB - buf flush disable failed %d\n",
+			__func__, ret);
+	} else {
+		hba->wb_buf_flush_enabled = false;
+		dev_dbg(hba->dev, "WB - Flush disabled: %d\n", ret);
+	}
+
+	return ret;
+}
+
+static bool ufshcd_wb_is_buf_flush_needed(struct ufs_hba *hba)
+{
+	int ret;
+	u32 cur_buf, status, avail_buf;
+
+	if (!ufshcd_wb_sup(hba))
+		return false;
+
+	ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+				      QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
+				      0, 0, &avail_buf);
+	if (ret) {
+		dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n",
+			 __func__, ret);
+		return false;
+	}
+
+	ret = ufshcd_vops_get_user_cap_mode(hba);
+	if (ret <= 0) {
+		dev_dbg(hba->dev, "Get user-cap reduction mode: failed: %d\n",
+			ret);
+		/* Most commonly used */
+		ret = UFS_WB_BUFF_PRESERVE_USER_SPACE;
+	}
+
+	hba->dev_info.keep_vcc_on = false;
+	if (ret == UFS_WB_BUFF_USER_SPACE_RED_EN) {
+		if (avail_buf <= UFS_WB_10_PERCENT_BUF_REMAIN) {
+			hba->dev_info.keep_vcc_on = true;
+			return true;
+		}
+		return false;
+	} else if (ret == UFS_WB_BUFF_PRESERVE_USER_SPACE) {
+		ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+					      QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
+					      0, 0, &cur_buf);
+		if (ret) {
+			dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n",
+				 __func__, ret);
+			return false;
+		}
+
+		if (!cur_buf) {
+			dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
+				 cur_buf);
+			return false;
+		}
+
+		ret = ufshcd_get_ee_status(hba, &status);
+		if (ret) {
+			dev_err(hba->dev, "%s: failed to get exception status %d\n",
+				__func__, ret);
+			if (avail_buf < UFS_WB_40_PERCENT_BUF_REMAIN) {
+				hba->dev_info.keep_vcc_on = true;
+				return true;
+			}
+			return false;
+		}
+
+		status &= hba->ee_ctrl_mask;
+
+		if ((status & MASK_EE_URGENT_BKOPS) ||
+		    (avail_buf < UFS_WB_40_PERCENT_BUF_REMAIN)) {
+			hba->dev_info.keep_vcc_on = true;
+			return true;
+		}
+	}
+	return false;
+}
+
 /**
  * ufshcd_exception_event_handler - handle exceptions raised by device
  * @work: pointer to work data
@@ -8353,6 +8556,21 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
 
 	model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
 
+
+	/* Enable WB only for UFS-3.1 OR if desc len >= 0x59 */
+	if ((dev_desc->wspecversion >= 0x310) ||
+	    (dev_desc->wmanufacturerid == UFS_VENDOR_TOSHIBA &&
+	     dev_desc->wspecversion >= 0x300 &&
+	     hba->desc_size.dev_desc >= 0x59))
+		hba->dev_info.d_ext_ufs_feature_sup =
+			desc_buf[DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP]
+								<< 24 |
+			desc_buf[DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 1]
+								<< 16 |
+			desc_buf[DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 2]
+								<< 8 |
+			desc_buf[DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 3];
+
 	/* Zero-pad entire buffer for string termination. */
 	memset(desc_buf, 0, buff_len);
 
@@ -8922,6 +9140,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
 	/* set the state as operational after switching to desired gear */
 	hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
 
+	ufshcd_wb_config(hba);
+
 	/*
 	 * If we are in error handling context or in power management callbacks
 	 * context, no need to scan the host
@@ -10223,6 +10443,9 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
 	 *
 	 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
 	 * in low power state which would save some power.
+	 *
+	 * If Write Booster is enabled and the device needs to flush the WB
+	 * buffer OR if bkops status is urgent for WB, keep Vcc on.
 	 */
 	if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
 	    !hba->dev_info.is_lu_power_on_wp) {
@@ -10241,7 +10464,8 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
 		else
 			ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
 	} else if (!ufshcd_is_ufs_dev_active(hba)) {
-		ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
+		if (!hba->dev_info.keep_vcc_on)
+			ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
 		if (!ufshcd_is_link_active(hba)) {
 			ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
 			ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
@@ -10436,11 +10660,16 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 			/* make sure that auto bkops is disabled */
 			ufshcd_disable_auto_bkops(hba);
 		}
+		ufshcd_wb_toggle_flush(hba);
+	} else if (!ufshcd_is_runtime_pm(pm_op)) {
+		ufshcd_wb_buf_flush_disable(hba);
+		hba->dev_info.keep_vcc_on = false;
 	}
 
 	if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
-	     ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
-	       !ufshcd_is_runtime_pm(pm_op))) {
+	    ((ufshcd_is_runtime_pm(pm_op) && (!hba->auto_bkops_enabled)
+	      && !hba->wb_buf_flush_enabled) ||
+	     !ufshcd_is_runtime_pm(pm_op))) {
 		/* ensure that bkops is disabled */
 		ufshcd_disable_auto_bkops(hba);
 		ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
@@ -10469,6 +10698,12 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 	if (ret)
 		goto set_link_active;
 
+	/*
+	 * Disable the host irq as host controller as there won't be any
+	 * host controller transaction expected till resume.
+	 */
+	ufshcd_disable_irq(hba);
+
 	if (!ufshcd_is_link_active(hba))
 		ret = ufshcd_disable_clocks(hba, false);
 	else
@@ -10485,16 +10720,13 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 		trace_ufshcd_clk_gating(dev_name(hba->dev),
 					hba->clk_gating.state);
 	}
-	/*
-	 * Disable the host irq as host controller as there won't be any
-	 * host controller transaction expected till resume.
-	 */
-	ufshcd_disable_irq(hba);
+
 	/* Put the host controller in low power mode if possible */
 	ufshcd_hba_vreg_set_lpm(hba);
 	goto out;
 
 set_link_active:
+	ufshcd_enable_irq(hba);
 	if (hba->clk_scaling.is_allowed)
 		ufshcd_resume_clkscaling(hba);
 	ufshcd_vreg_set_hpm(hba);
@@ -10593,6 +10825,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 			hba->hibern8_on_idle.state = HIBERN8_EXITED;
 	}
 
+	ufshcd_wb_buf_flush_disable(hba);
 	if (!ufshcd_is_ufs_dev_active(hba)) {
 		ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
 		if (ret)
@@ -10850,6 +11083,9 @@ int ufshcd_shutdown(struct ufs_hba *hba)
 {
 	int ret = 0;
 
+	if (!hba->is_powered)
+		goto out;
+
 	if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
 		goto out;
 
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 70ea549..5fe27648 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -363,6 +363,7 @@ struct ufs_hba_variant_ops {
 	u32	(*get_scale_down_gear)(struct ufs_hba *hba);
 	int	(*set_bus_vote)(struct ufs_hba *hba, bool on);
 	int	(*phy_initialization)(struct ufs_hba *);
+	u32	(*get_user_cap_mode)(struct ufs_hba *hba);
 #ifdef CONFIG_DEBUG_FS
 	void	(*add_debugfs)(struct ufs_hba *hba, struct dentry *root);
 	void	(*remove_debugfs)(struct ufs_hba *hba);
@@ -979,6 +980,7 @@ struct ufs_hba {
 	/* Keeps information of the UFS device connected to this host */
 	struct ufs_dev_info dev_info;
 	bool auto_bkops_enabled;
+	bool wb_buf_flush_enabled;
 
 #ifdef CONFIG_DEBUG_FS
 	struct debugfs_files debugfs_files;
@@ -1082,6 +1084,7 @@ struct ufs_hba {
 
 	bool phy_init_g4;
 	bool force_g4;
+	bool wb_enabled;
 };
 
 static inline void ufshcd_set_card_removal_ongoing(struct ufs_hba *hba)
@@ -1644,4 +1647,10 @@ static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
 		     const char *prefix);
 
+static inline unsigned int ufshcd_vops_get_user_cap_mode(struct ufs_hba *hba)
+{
+	if (hba->var && hba->var->vops->get_user_cap_mode)
+		return hba->var->vops->get_user_cap_mode(hba);
+	return 0;
+}
 #endif /* End of Header */
diff --git a/drivers/soc/qcom/dfc_qmap.c b/drivers/soc/qcom/dfc_qmap.c
index a4b2095..5e26739 100644
--- a/drivers/soc/qcom/dfc_qmap.c
+++ b/drivers/soc/qcom/dfc_qmap.c
@@ -148,6 +148,7 @@ static void dfc_qmap_send_inband_ack(struct dfc_qmi_data *dfc,
 	skb->protocol = htons(ETH_P_MAP);
 	skb->dev = rmnet_get_real_dev(dfc->rmnet_port);
 
+	rmnet_ctl_log_debug("TXI", skb->data, skb->len);
 	trace_dfc_qmap(skb->data, skb->len, false);
 	dev_queue_xmit(skb);
 }
@@ -433,6 +434,7 @@ static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
 	skb->dev = qos->real_dev;
 
 	/* This cmd needs to be sent in-band */
+	rmnet_ctl_log_info("TXI", skb->data, skb->len);
 	trace_dfc_qmap(skb->data, skb->len, false);
 	rmnet_map_tx_qmap_cmd(skb);
 }
diff --git a/drivers/soc/qcom/dfc_qmi.c b/drivers/soc/qcom/dfc_qmi.c
index f175881..f9c54b6 100644
--- a/drivers/soc/qcom/dfc_qmi.c
+++ b/drivers/soc/qcom/dfc_qmi.c
@@ -911,74 +911,52 @@ int dfc_bearer_flow_ctl(struct net_device *dev,
 			struct rmnet_bearer_map *bearer,
 			struct qos_info *qos)
 {
-	int rc = 0, qlen;
-	int enable;
-	int i;
+	bool enable;
 
-	enable = bearer->grant_size ? 1 : 0;
+	enable = bearer->grant_size ? true : false;
 
-	for (i = 0; i < MAX_MQ_NUM; i++) {
-		if (qos->mq[i].bearer == bearer) {
-			/* Do not flow disable ancillary q in tcp bidir */
-			if (qos->mq[i].ancillary &&
-			    bearer->tcp_bidir && !enable)
-				continue;
+	qmi_rmnet_flow_control(dev, bearer->mq_idx, enable);
+	trace_dfc_qmi_tc(dev->name, bearer->bearer_id,
+			 bearer->grant_size,
+			 0, bearer->mq_idx, enable);
 
-			qlen = qmi_rmnet_flow_control(dev, i, enable);
-			trace_dfc_qmi_tc(dev->name, bearer->bearer_id,
-					 bearer->grant_size,
-					 qlen, i, enable);
-			rc++;
-		}
+	/* Do not flow disable tcp ack q in tcp bidir */
+	if (bearer->ack_mq_idx != INVALID_MQ &&
+	    (enable || !bearer->tcp_bidir)) {
+		qmi_rmnet_flow_control(dev, bearer->ack_mq_idx, enable);
+		trace_dfc_qmi_tc(dev->name, bearer->bearer_id,
+				 bearer->grant_size,
+				 0, bearer->ack_mq_idx, enable);
 	}
 
-	if (enable == 0 && bearer->ack_req)
+	if (!enable && bearer->ack_req)
 		dfc_send_ack(dev, bearer->bearer_id,
 			     bearer->seq, qos->mux_id,
 			     DFC_ACK_TYPE_DISABLE);
 
-	return rc;
+	return 0;
 }
 
 static int dfc_all_bearer_flow_ctl(struct net_device *dev,
 				struct qos_info *qos, u8 ack_req, u32 ancillary,
 				struct dfc_flow_status_info_type_v01 *fc_info)
 {
-	struct rmnet_bearer_map *bearer_itm;
-	int rc = 0, qlen;
-	bool enable;
-	int i;
+	struct rmnet_bearer_map *bearer;
 
-	enable = fc_info->num_bytes > 0 ? 1 : 0;
+	list_for_each_entry(bearer, &qos->bearer_head, list) {
+		bearer->grant_size = fc_info->num_bytes;
+		bearer->grant_thresh =
+			qmi_rmnet_grant_per(bearer->grant_size);
+		bearer->seq = fc_info->seq_num;
+		bearer->ack_req = ack_req;
+		bearer->tcp_bidir = DFC_IS_TCP_BIDIR(ancillary);
+		bearer->last_grant = fc_info->num_bytes;
+		bearer->last_seq = fc_info->seq_num;
 
-	list_for_each_entry(bearer_itm, &qos->bearer_head, list) {
-		bearer_itm->grant_size = fc_info->num_bytes;
-		bearer_itm->grant_thresh =
-			qmi_rmnet_grant_per(bearer_itm->grant_size);
-		bearer_itm->seq = fc_info->seq_num;
-		bearer_itm->ack_req = ack_req;
-		bearer_itm->tcp_bidir = DFC_IS_TCP_BIDIR(ancillary);
-		bearer_itm->last_grant = fc_info->num_bytes;
-		bearer_itm->last_seq = fc_info->seq_num;
+		dfc_bearer_flow_ctl(dev, bearer, qos);
 	}
 
-	for (i = 0; i < MAX_MQ_NUM; i++) {
-		bearer_itm = qos->mq[i].bearer;
-		if (!bearer_itm)
-			continue;
-		qlen = qmi_rmnet_flow_control(dev, i, enable);
-		trace_dfc_qmi_tc(dev->name, bearer_itm->bearer_id,
-				 fc_info->num_bytes,
-				 qlen, i, enable);
-		rc++;
-	}
-
-	if (enable == 0 && ack_req)
-		dfc_send_ack(dev, fc_info->bearer_id,
-			     fc_info->seq_num, fc_info->mux_id,
-			     DFC_ACK_TYPE_DISABLE);
-
-	return rc;
+	return 0;
 }
 
 static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
@@ -1023,9 +1001,8 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
 
 		if (action)
 			rc = dfc_bearer_flow_ctl(dev, itm, qos);
-	} else {
-		qos->default_grant = fc_info->num_bytes;
 	}
+
 	return rc;
 }
 
diff --git a/drivers/soc/qcom/eud.c b/drivers/soc/qcom/eud.c
index 8c701af..a8e21ab 100644
--- a/drivers/soc/qcom/eud.c
+++ b/drivers/soc/qcom/eud.c
@@ -93,65 +93,33 @@ static struct platform_device *eud_private;
 static void enable_eud(struct platform_device *pdev)
 {
 	struct eud_chip *priv = platform_get_drvdata(pdev);
-	struct power_supply *usb_psy = NULL;
-	union power_supply_propval pval = {0};
-	union power_supply_propval tval = {0};
 	int ret;
 
-	usb_psy = power_supply_get_by_name("usb");
-	if (!usb_psy) {
-		dev_warn(&pdev->dev, "%s: Could not get usb power_supply\n",
-					__func__);
-		return;
+	/* write into CSR to enable EUD */
+	writel_relaxed(BIT(0), priv->eud_reg_base + EUD_REG_CSR_EUD_EN);
+
+	/* Enable vbus, chgr & safe mode warning interrupts */
+	writel_relaxed(EUD_INT_VBUS | EUD_INT_CHGR | EUD_INT_SAFE_MODE,
+			priv->eud_reg_base + EUD_REG_INT1_EN_MASK);
+
+	/* Enable secure eud if supported */
+	if (priv->secure_eud_en) {
+		ret = scm_io_write(priv->eud_mode_mgr2_phys_base +
+				   EUD_REG_EUD_EN2, EUD_ENABLE_CMD);
+		if (ret)
+			dev_err(&pdev->dev,
+			"scm_io_write failed with rc:%d\n", ret);
 	}
 
-	ret = power_supply_get_property(usb_psy,
-			POWER_SUPPLY_PROP_PRESENT, &pval);
-	if (ret) {
-		dev_err(&pdev->dev, "%s: Unable to read USB PRESENT: %d\n",
-					__func__, ret);
-		return;
-	}
+	/* Ensure Register Writes Complete */
+	wmb();
 
-	ret = power_supply_get_property(usb_psy,
-			POWER_SUPPLY_PROP_REAL_TYPE, &tval);
-	if (ret) {
-		dev_err(&pdev->dev, "%s: Unable to read USB TYPE: %d\n",
-					__func__, ret);
-		return;
-	}
-
-	if (pval.intval && (tval.intval == POWER_SUPPLY_TYPE_USB ||
-	    tval.intval == POWER_SUPPLY_TYPE_USB_CDP)) {
-		/* write into CSR to enable EUD */
-		writel_relaxed(BIT(0), priv->eud_reg_base + EUD_REG_CSR_EUD_EN);
-		/* Enable vbus, chgr & safe mode warning interrupts */
-		writel_relaxed(EUD_INT_VBUS | EUD_INT_CHGR | EUD_INT_SAFE_MODE,
-				priv->eud_reg_base + EUD_REG_INT1_EN_MASK);
-		/* Enable secure eud if supported */
-		if (priv->secure_eud_en) {
-			ret = scm_io_write(priv->eud_mode_mgr2_phys_base +
-					   EUD_REG_EUD_EN2, EUD_ENABLE_CMD);
-			if (ret)
-				dev_err(&pdev->dev,
-				"scm_io_write failed with rc:%d\n", ret);
-		}
-
-		/* Ensure Register Writes Complete */
-		wmb();
-
-		/*
-		 * Set the default cable state to usb connect and charger
-		 * enable
-		 */
-		extcon_set_state_sync(priv->extcon, EXTCON_USB, true);
-		extcon_set_state_sync(priv->extcon, EXTCON_CHG_USB_SDP, true);
-	} else {
-		dev_warn(&pdev->dev,
-			"%s: Connect USB cable before enabling EUD\n",
-			__func__);
-		return;
-	}
+	/*
+	 * Set the default cable state to usb connect and charger
+	 * enable
+	 */
+	extcon_set_state_sync(priv->extcon, EXTCON_USB, true);
+	extcon_set_state_sync(priv->extcon, EXTCON_CHG_USB_SDP, true);
 
 	dev_dbg(&pdev->dev, "%s: EUD is Enabled\n", __func__);
 }
@@ -549,6 +517,7 @@ static int msm_eud_probe(struct platform_device *pdev)
 
 	platform_set_drvdata(pdev, chip);
 
+	chip->dev = &pdev->dev;
 	chip->extcon = devm_extcon_dev_allocate(&pdev->dev, eud_extcon_cable);
 	if (IS_ERR(chip->extcon)) {
 		dev_err(chip->dev, "%s: failed to allocate extcon device\n",
diff --git a/drivers/soc/qcom/llcc-lito.c b/drivers/soc/qcom/llcc-lito.c
index 9919159..e03053c 100644
--- a/drivers/soc/qcom/llcc-lito.c
+++ b/drivers/soc/qcom/llcc-lito.c
@@ -51,16 +51,16 @@
 	}
 
 static struct llcc_slice_config lito_data[] =  {
-	SCT_ENTRY(LLCC_CPUSS,    1, 1024, 1, 1, 0x0FF, 0x0, 0, 0, 0, 0, 1, 1),
-	SCT_ENTRY(LLCC_AUDIO,    6, 1024, 1, 1, 0x0FF, 0x0, 0, 0, 0, 0, 1, 0),
-	SCT_ENTRY(LLCC_MDM,      8, 512, 2, 0, 0x0FF, 0x0, 0, 0, 0, 0, 1, 0),
-	SCT_ENTRY(LLCC_GPUHTW,   11, 256, 1, 1, 0x0FF, 0x0, 0, 0, 0, 0, 1, 0),
-	SCT_ENTRY(LLCC_GPU,      12, 256, 1, 1, 0x0FF, 0x0, 0, 0, 0, 0, 1, 0),
-	SCT_ENTRY(LLCC_DISP,     16, 1024, 1, 1, 0x0FF, 0x0, 0, 0, 0, 0, 1, 0),
-	SCT_ENTRY(LLCC_MDMPNG,   21, 1024, 1, 1, 0x0FF, 0x0, 0, 0, 0, 0, 1, 0),
-	SCT_ENTRY(LLCC_AUDHW,    22, 1024, 1, 1, 0x0FF, 0x0, 0, 0, 0, 0, 1, 0),
-	SCT_ENTRY(LLCC_NPU,      23, 512, 2, 1, 0x0,  0xF00, 0, 0, 0, 0, 1, 0),
-	SCT_ENTRY(LLCC_MODEMVPE, 29, 128, 1, 1, 0x0FF, 0x0, 0, 0, 0, 0, 1, 0),
+	SCT_ENTRY(LLCC_CPUSS,    1, 1536, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 1),
+	SCT_ENTRY(LLCC_AUDIO,    6, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
+	SCT_ENTRY(LLCC_MDM,      8, 512, 2, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
+	SCT_ENTRY(LLCC_GPUHTW,   11, 512, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
+	SCT_ENTRY(LLCC_GPU,      12, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
+	SCT_ENTRY(LLCC_DISP,     16, 1536, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
+	SCT_ENTRY(LLCC_MDMPNG,   21, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
+	SCT_ENTRY(LLCC_AUDHW,    22, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
+	SCT_ENTRY(LLCC_NPU,      23, 512, 2, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
+	SCT_ENTRY(LLCC_MODEMVPE, 29, 128, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
 };
 
 static int lito_qcom_llcc_probe(struct platform_device *pdev)
diff --git a/drivers/soc/qcom/memory_dump_v2.c b/drivers/soc/qcom/memory_dump_v2.c
index 58bb831..ab63799 100644
--- a/drivers/soc/qcom/memory_dump_v2.c
+++ b/drivers/soc/qcom/memory_dump_v2.c
@@ -24,6 +24,37 @@
 
 #define SCM_CMD_DEBUG_LAR_UNLOCK	0x4
 
+#define CPUSS_REGDUMP			0xEF
+
+#define INPUT_DATA_BY_HLOS		0x00C0FFEE
+#define FORMAT_VERSION_1		0x1
+#define CORE_REG_NUM_DEFAULT		0x1
+
+#define MAGIC_INDEX			0
+#define FORMAT_VERSION_INDEX		1
+#define SYS_REG_INPUT_INDEX		2
+#define OUTPUT_DUMP_INDEX		3
+#define PERCORE_INDEX			4
+#define SYSTEM_REGS_INPUT_INDEX	5
+
+struct cpuss_dump_data {
+	void *dump_vaddr;
+	u32 size;
+	u32 core_reg_num;
+	u32 core_reg_used_num;
+	u32 core_reg_end_index;
+	u32 sys_reg_size;
+	u32 used_memory;
+	struct mutex mutex;
+};
+
+struct reg_dump_data {
+	uint32_t magic;
+	uint32_t version;
+	uint32_t system_regs_input_index;
+	uint32_t regdump_output_byte_offset;
+};
+
 struct msm_dump_table {
 	uint32_t version;
 	uint32_t num_entries;
@@ -37,6 +68,388 @@ struct msm_memory_dump {
 
 static struct msm_memory_dump memdump;
 
+/**
+ * update_reg_dump_table - update the register dump table
+ * @core_reg_num: the number of per-core registers
+ *
+ * This function calculates system_regs_input_index and
+ * regdump_output_byte_offset to store into the dump memory.
+ * It also updates members of cpudata by the parameter core_reg_num.
+ *
+ * Returns 0 on success, or -ENOMEM on error of no enough memory.
+ */
+static int update_reg_dump_table(struct device *dev, u32 core_reg_num)
+{
+	int ret = 0;
+	u32 system_regs_input_index = SYSTEM_REGS_INPUT_INDEX +
+			core_reg_num * 2;
+	u32 regdump_output_byte_offset = (system_regs_input_index + 1)
+			* sizeof(uint32_t);
+	struct reg_dump_data *p;
+	struct cpuss_dump_data *cpudata = dev_get_drvdata(dev);
+
+	mutex_lock(&cpudata->mutex);
+
+	if (regdump_output_byte_offset >= cpudata->size ||
+			regdump_output_byte_offset / sizeof(uint32_t)
+			< system_regs_input_index + 1) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	cpudata->core_reg_num = core_reg_num;
+	cpudata->core_reg_used_num = 0;
+	cpudata->core_reg_end_index = PERCORE_INDEX;
+	cpudata->sys_reg_size = 0;
+	cpudata->used_memory = regdump_output_byte_offset;
+
+	memset(cpudata->dump_vaddr, 0xDE, cpudata->size);
+	p = (struct reg_dump_data *)cpudata->dump_vaddr;
+	p->magic = INPUT_DATA_BY_HLOS;
+	p->version = FORMAT_VERSION_1;
+	p->system_regs_input_index = system_regs_input_index;
+	p->regdump_output_byte_offset = regdump_output_byte_offset;
+	memset((uint32_t *)cpudata->dump_vaddr + PERCORE_INDEX, 0x0,
+			(system_regs_input_index - PERCORE_INDEX + 1)
+			* sizeof(uint32_t));
+
+err:
+	mutex_unlock(&cpudata->mutex);
+	return ret;
+}
+
+static ssize_t core_reg_num_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	int ret;
+	struct cpuss_dump_data *cpudata = dev_get_drvdata(dev);
+
+	if (!cpudata)
+		return -EFAULT;
+
+	mutex_lock(&cpudata->mutex);
+
+	ret = scnprintf(buf, PAGE_SIZE, "%u\n", cpudata->core_reg_num);
+
+	mutex_unlock(&cpudata->mutex);
+	return ret;
+}
+
+static ssize_t core_reg_num_store(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t size)
+{
+	int ret;
+	unsigned int val;
+	struct cpuss_dump_data *cpudata = dev_get_drvdata(dev);
+
+	if (kstrtouint(buf, 16, &val))
+		return -EINVAL;
+
+	mutex_lock(&cpudata->mutex);
+
+	if (cpudata->core_reg_used_num || cpudata->sys_reg_size) {
+		dev_err(dev, "Couldn't set core_reg_num, register available in list\n");
+		ret = -EPERM;
+		goto err;
+	}
+	if (val == cpudata->core_reg_num) {
+		ret = 0;
+		goto err;
+	}
+
+	mutex_unlock(&cpudata->mutex);
+
+	ret = update_reg_dump_table(dev, val);
+	if (ret) {
+		dev_err(dev, "Couldn't set core_reg_num, no enough memory\n");
+		return ret;
+	}
+
+	return size;
+
+err:
+	mutex_unlock(&cpudata->mutex);
+	return ret;
+}
+static DEVICE_ATTR_RW(core_reg_num);
+
+/**
+ * This function shows configs of per-core and system registers.
+ */
+static ssize_t register_config_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	char local_buf[64];
+	int len = 0, count = 0;
+	int index, system_index_start, index_end;
+	uint32_t register_offset, length_in_bytes;
+	uint32_t length_in_words;
+	uint32_t *p;
+	struct cpuss_dump_data *cpudata = dev_get_drvdata(dev);
+
+	buf[0] = '\0';
+
+	if (!cpudata)
+		return -EFAULT;
+
+	mutex_lock(&cpudata->mutex);
+
+	p = (uint32_t *)cpudata->dump_vaddr;
+
+	/* print per-core & system registers */
+	len = snprintf(local_buf, 64, "per-core registers:\n");
+	strlcat(buf, local_buf, PAGE_SIZE);
+	count += len;
+
+	system_index_start = *(p + SYS_REG_INPUT_INDEX);
+	index_end = system_index_start +
+			cpudata->sys_reg_size / sizeof(uint32_t) + 1;
+	for (index = PERCORE_INDEX; index < index_end;) {
+		if (index == system_index_start) {
+			len = snprintf(local_buf, 64, "system registers:\n");
+			if ((count + len) > PAGE_SIZE) {
+				dev_err(dev, "Couldn't write complete config\n");
+				break;
+			}
+
+			strlcat(buf, local_buf, PAGE_SIZE);
+			count += len;
+		}
+
+		register_offset = *(p + index);
+		if (register_offset == 0) {
+			index++;
+			continue;
+		}
+
+		if (register_offset & 0x3) {
+			length_in_words = register_offset & 0x3;
+			length_in_bytes = length_in_words << 2;
+			len = snprintf(local_buf, 64,
+				"Index: 0x%x, addr: 0x%x\n",
+				index, register_offset);
+			index++;
+		} else {
+			length_in_bytes = *(p + index + 1);
+			len = snprintf(local_buf, 64,
+				"Index: 0x%x, addr: 0x%x, length: 0x%x\n",
+				index, register_offset, length_in_bytes);
+			index += 2;
+		}
+
+		if ((count + len) > PAGE_SIZE) {
+			dev_err(dev, "Couldn't write complete config\n");
+			break;
+		}
+
+		strlcat(buf, local_buf, PAGE_SIZE);
+		count += len;
+	}
+
+	mutex_unlock(&cpudata->mutex);
+	return count;
+}
+
+/**
+ * This function sets configs of per-core or system registers.
+ */
+static ssize_t register_config_store(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t size)
+{
+	int ret;
+	uint32_t register_offset, length_in_bytes, per_core = 0;
+	uint32_t length_in_words;
+	int nval;
+	uint32_t num_cores;
+	u32 extra_memory;
+	u32 used_memory;
+	u32 system_reg_end_index;
+	uint32_t *p;
+	struct cpuss_dump_data *cpudata = dev_get_drvdata(dev);
+
+	nval = sscanf(buf, "%x %x %u", &register_offset,
+				&length_in_bytes, &per_core);
+	if (nval != 2 && nval != 3)
+		return -EINVAL;
+	if (per_core > 1)
+		return -EINVAL;
+	if (register_offset & 0x3) {
+		dev_err(dev, "Invalid address, must be 4 byte aligned\n");
+		return -EINVAL;
+	}
+	if (length_in_bytes & 0x3) {
+		dev_err(dev, "Invalid length, must be 4 byte aligned\n");
+		return -EINVAL;
+	}
+	if (length_in_bytes == 0) {
+		dev_err(dev, "Invalid length of 0\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&cpudata->mutex);
+
+	p = (uint32_t *)cpudata->dump_vaddr;
+	length_in_words = length_in_bytes >> 2;
+	if (per_core) { /* per-core register */
+		if (cpudata->core_reg_used_num == cpudata->core_reg_num) {
+			dev_err(dev, "Couldn't add per-core config, out of range\n");
+			ret = -EINVAL;
+			goto err;
+		}
+
+		num_cores = num_possible_cpus();
+		extra_memory = length_in_bytes * num_cores;
+		used_memory = cpudata->used_memory + extra_memory;
+		if (extra_memory / num_cores < length_in_bytes ||
+				used_memory > cpudata->size ||
+				used_memory < cpudata->used_memory) {
+			dev_err(dev, "Couldn't add per-core reg config, no enough memory\n");
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		if (length_in_words > 3) {
+			*(p + cpudata->core_reg_end_index) = register_offset;
+			*(p + cpudata->core_reg_end_index + 1) =
+					length_in_bytes;
+			cpudata->core_reg_end_index += 2;
+		} else {
+			*(p + cpudata->core_reg_end_index) = register_offset |
+					length_in_words;
+			cpudata->core_reg_end_index++;
+		}
+
+		cpudata->core_reg_used_num++;
+		cpudata->used_memory = used_memory;
+	} else { /* system register */
+		system_reg_end_index = *(p + SYS_REG_INPUT_INDEX) +
+				cpudata->sys_reg_size / sizeof(uint32_t);
+
+		if (length_in_words > 3) {
+			extra_memory = sizeof(uint32_t) * 2 + length_in_bytes;
+			used_memory = cpudata->used_memory + extra_memory;
+			if (extra_memory < length_in_bytes ||
+					used_memory > cpudata->size ||
+					used_memory < cpudata->used_memory) {
+				dev_err(dev, "Couldn't add system reg config, no enough memory\n");
+				ret = -ENOMEM;
+				goto err;
+			}
+
+			*(p + system_reg_end_index) = register_offset;
+			*(p + system_reg_end_index + 1) = length_in_bytes;
+			system_reg_end_index += 2;
+			cpudata->sys_reg_size += sizeof(uint32_t) * 2;
+		} else {
+			extra_memory = sizeof(uint32_t) + length_in_bytes;
+			used_memory = cpudata->used_memory + extra_memory;
+			if (extra_memory < length_in_bytes ||
+					used_memory > cpudata->size ||
+					used_memory < cpudata->used_memory) {
+				dev_err(dev, "Couldn't add system reg config, no enough memory\n");
+				ret = -ENOMEM;
+				goto err;
+			}
+
+			*(p + system_reg_end_index) = register_offset |
+					length_in_words;
+			system_reg_end_index++;
+			cpudata->sys_reg_size += sizeof(uint32_t);
+		}
+
+		cpudata->used_memory = used_memory;
+
+		*(p + system_reg_end_index) = 0x0;
+		*(p + OUTPUT_DUMP_INDEX) = (system_reg_end_index + 1)
+				* sizeof(uint32_t);
+	}
+
+	ret = size;
+
+err:
+	mutex_unlock(&cpudata->mutex);
+	return ret;
+}
+static DEVICE_ATTR_RW(register_config);
+
+/**
+ * This function resets the register dump table.
+ */
+static ssize_t register_reset_store(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t size)
+{
+	unsigned int val;
+
+	if (kstrtouint(buf, 16, &val))
+		return -EINVAL;
+	if (val != 1)
+		return -EINVAL;
+
+	update_reg_dump_table(dev, CORE_REG_NUM_DEFAULT);
+
+	return size;
+}
+static DEVICE_ATTR_WO(register_reset);
+
+static const struct device_attribute *register_dump_attrs[] = {
+	&dev_attr_core_reg_num,
+	&dev_attr_register_config,
+	&dev_attr_register_reset,
+	NULL,
+};
+
+static int register_dump_create_files(struct device *dev,
+			const struct device_attribute **attrs)
+{
+	int ret = 0;
+	int i, j;
+
+	for (i = 0; attrs[i] != NULL; i++) {
+		ret = device_create_file(dev, attrs[i]);
+		if (ret) {
+			dev_err(dev, "Couldn't create sysfs attribute: %s\n",
+				attrs[i]->attr.name);
+			for (j = 0; j < i; j++)
+				device_remove_file(dev, attrs[j]);
+			break;
+		}
+	}
+	return ret;
+}
+
+static void cpuss_regdump_init(struct platform_device *pdev,
+		void *dump_vaddr, u32 size)
+{
+	struct cpuss_dump_data *cpudata = NULL;
+	int ret;
+
+	cpudata = devm_kzalloc(&pdev->dev,
+			sizeof(struct cpuss_dump_data), GFP_KERNEL);
+	if (!cpudata)
+		goto fail;
+
+	cpudata->dump_vaddr = dump_vaddr;
+	cpudata->size = size;
+
+	mutex_init(&cpudata->mutex);
+	ret = register_dump_create_files(&pdev->dev,
+			register_dump_attrs);
+	if (ret) {
+		devm_kfree(&pdev->dev, cpudata);
+		goto fail;
+	}
+
+	platform_set_drvdata(pdev, cpudata);
+
+	return;
+
+fail:
+	pr_err("Failed to initialize CPUSS regdump region\n");
+}
+
 uint32_t msm_dump_table_version(void)
 {
 	return MSM_DUMP_TABLE_VERSION;
@@ -350,6 +763,10 @@ static int mem_dump_alloc(struct platform_device *pdev)
 			dev_err(&pdev->dev, "Mini dump entry failed id = %d\n",
 				id);
 
+		if (id == CPUSS_REGDUMP)
+			cpuss_regdump_init(pdev,
+				(dump_vaddr + MSM_DUMP_DATA_SIZE), size);
+
 		dump_vaddr += (size + MSM_DUMP_DATA_SIZE);
 		phys_addr += (size  + MSM_DUMP_DATA_SIZE);
 	}
diff --git a/drivers/soc/qcom/qbt_handler.c b/drivers/soc/qcom/qbt_handler.c
index dffe543..9550acb 100644
--- a/drivers/soc/qcom/qbt_handler.c
+++ b/drivers/soc/qcom/qbt_handler.c
@@ -966,7 +966,7 @@ static int setup_ipc_irq(struct platform_device *pdev,
 		drvdata->fw_ipc.irq,
 		NULL,
 		qbt_ipc_irq_handler,
-		IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+		IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
 		desc,
 		drvdata);
 
diff --git a/drivers/soc/qcom/qcom_ipcc.c b/drivers/soc/qcom/qcom_ipcc.c
index bcc2a5e..6633756 100644
--- a/drivers/soc/qcom/qcom_ipcc.c
+++ b/drivers/soc/qcom/qcom_ipcc.c
@@ -7,6 +7,7 @@
 #include <linux/irq.h>
 #include <linux/interrupt.h>
 #include <linux/irqdomain.h>
+#include <linux/syscore_ops.h>
 #include <linux/platform_device.h>
 #include <linux/mailbox_controller.h>
 #include <dt-bindings/soc/qcom,ipcc.h>
@@ -59,6 +60,8 @@ struct ipcc_mbox_chan {
 	struct ipcc_protocol_data *proto_data;
 };
 
+static struct ipcc_protocol_data *ipcc_proto_data;
+
 static inline u32 qcom_ipcc_get_packed_id(u16 client_id, u16 signal_id)
 {
 	return (client_id << IPCC_CLIENT_ID_SHIFT) | signal_id;
@@ -305,6 +308,43 @@ static int qcom_ipcc_setup_mbox(struct ipcc_protocol_data *proto_data,
 	return mbox_controller_register(mbox);
 }
 
+#ifdef CONFIG_PM
+static int msm_ipcc_suspend(void)
+{
+	return 0;
+}
+
+static void msm_ipcc_resume(void)
+{
+	int virq;
+	struct irq_desc *desc;
+	const char *name = "null";
+	u32 packed_id;
+	struct ipcc_protocol_data *proto_data = ipcc_proto_data;
+
+	packed_id = readl_no_log(proto_data->base + IPCC_REG_RECV_ID);
+	if (packed_id == IPCC_NO_PENDING_IRQ)
+		return;
+
+	virq = irq_find_mapping(proto_data->irq_domain, packed_id);
+	desc = irq_to_desc(virq);
+	if (desc == NULL)
+		name = "stray irq";
+	else if (desc->action && desc->action->name)
+		name = desc->action->name;
+
+	pr_warn("%s: %d triggered %s\n", __func__, virq, name);
+}
+#else
+#define msm_ipcc_suspend NULL
+#define msm_ipcc_resume NULL
+#endif
+
+static struct syscore_ops msm_ipcc_pm_ops = {
+	.suspend = msm_ipcc_suspend,
+	.resume = msm_ipcc_resume,
+};
+
 static int qcom_ipcc_probe(struct platform_device *pdev)
 {
 	char *name;
@@ -317,6 +357,7 @@ static int qcom_ipcc_probe(struct platform_device *pdev)
 	if (!proto_data)
 		return -ENOMEM;
 
+	ipcc_proto_data = proto_data;
 	proto_data->dev = &pdev->dev;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -367,6 +408,7 @@ static int qcom_ipcc_probe(struct platform_device *pdev)
 
 	enable_irq_wake(proto_data->irq);
 	platform_set_drvdata(pdev, proto_data);
+	register_syscore_ops(&msm_ipcc_pm_ops);
 
 	return 0;
 
@@ -381,6 +423,7 @@ static int qcom_ipcc_remove(struct platform_device *pdev)
 {
 	struct ipcc_protocol_data *proto_data = platform_get_drvdata(pdev);
 
+	unregister_syscore_ops(&msm_ipcc_pm_ops);
 	disable_irq_wake(proto_data->irq);
 	mbox_controller_unregister(&proto_data->mbox);
 	irq_domain_remove(proto_data->irq_domain);
diff --git a/drivers/soc/qcom/qdss_bridge.c b/drivers/soc/qcom/qdss_bridge.c
index 65089ad..5218136 100644
--- a/drivers/soc/qcom/qdss_bridge.c
+++ b/drivers/soc/qcom/qdss_bridge.c
@@ -436,11 +436,9 @@ static void mhi_read_done_work_fn(struct work_struct *work)
 static void usb_write_done(struct qdss_bridge_drvdata *drvdata,
 				   struct qdss_request *d_req)
 {
-	if (d_req->status) {
+	if (d_req->status)
 		pr_err_ratelimited("USB write failed err:%d\n", d_req->status);
-		mhi_queue_read(drvdata);
-		return;
-	}
+
 	qdss_buf_tbl_remove(drvdata, d_req->buf);
 	mhi_queue_read(drvdata);
 }
diff --git a/drivers/soc/qcom/qmi_rmnet.c b/drivers/soc/qcom/qmi_rmnet.c
index 5cc51bf..3366add 100644
--- a/drivers/soc/qcom/qmi_rmnet.c
+++ b/drivers/soc/qcom/qmi_rmnet.c
@@ -26,8 +26,11 @@
 #define FLAG_QMAP_MASK 0x0020
 
 #define FLAG_TO_MODE(f) ((f) & FLAG_DFC_MASK)
+
 #define DFC_SUPPORTED_MODE(m) \
-	((m) == DFC_MODE_FLOW_ID || (m) == DFC_MODE_MQ_NUM)
+	((m) == DFC_MODE_FLOW_ID || (m) == DFC_MODE_MQ_NUM || \
+	 (m) == DFC_MODE_SA)
+
 #define FLAG_TO_QMAP(f) ((f) & FLAG_QMAP_MASK)
 
 int dfc_mode;
@@ -218,6 +221,131 @@ static void qmi_rmnet_reset_txq(struct net_device *dev, unsigned int txq)
 	}
 }
 
+static struct rmnet_bearer_map *__qmi_rmnet_bearer_get(
+				struct qos_info *qos_info, u8 bearer_id)
+{
+	struct rmnet_bearer_map *bearer;
+
+	bearer = qmi_rmnet_get_bearer_map(qos_info, bearer_id);
+	if (bearer) {
+		bearer->flow_ref++;
+	} else {
+		bearer = kzalloc(sizeof(*bearer), GFP_ATOMIC);
+		if (!bearer)
+			return NULL;
+
+		bearer->bearer_id = bearer_id;
+		bearer->flow_ref = 1;
+		bearer->grant_size = DEFAULT_GRANT;
+		bearer->grant_thresh = qmi_rmnet_grant_per(bearer->grant_size);
+		bearer->mq_idx = INVALID_MQ;
+		bearer->ack_mq_idx = INVALID_MQ;
+		list_add(&bearer->list, &qos_info->bearer_head);
+	}
+
+	return bearer;
+}
+
+static void __qmi_rmnet_bearer_put(struct net_device *dev,
+				   struct qos_info *qos_info,
+				   struct rmnet_bearer_map *bearer,
+				   bool reset)
+{
+	struct mq_map *mq;
+	int i, j;
+
+	if (bearer && --bearer->flow_ref == 0) {
+		for (i = 0; i < MAX_MQ_NUM; i++) {
+			mq = &qos_info->mq[i];
+			if (mq->bearer != bearer)
+				continue;
+
+			mq->bearer = NULL;
+			if (reset) {
+				qmi_rmnet_reset_txq(dev, i);
+				qmi_rmnet_flow_control(dev, i, 1);
+				trace_dfc_qmi_tc(dev->name,
+					bearer->bearer_id, 0, 0, i, 1);
+
+				if (dfc_mode == DFC_MODE_SA) {
+					j = i + ACK_MQ_OFFSET;
+					qmi_rmnet_reset_txq(dev, j);
+					qmi_rmnet_flow_control(dev, j, 1);
+					trace_dfc_qmi_tc(dev->name,
+						bearer->bearer_id, 0, 0, j, 1);
+				}
+			}
+		}
+
+		/* Remove from bearer map */
+		list_del(&bearer->list);
+		kfree(bearer);
+	}
+}
+
+static void __qmi_rmnet_update_mq(struct net_device *dev,
+				  struct qos_info *qos_info,
+				  struct rmnet_bearer_map *bearer,
+				  struct rmnet_flow_map *itm)
+{
+	struct mq_map *mq;
+
+	/* In SA mode default mq is not associated with any bearer */
+	if (dfc_mode == DFC_MODE_SA && itm->mq_idx == DEFAULT_MQ_NUM)
+		return;
+
+	mq = &qos_info->mq[itm->mq_idx];
+	if (!mq->bearer) {
+		mq->bearer = bearer;
+
+		if (dfc_mode == DFC_MODE_SA) {
+			bearer->mq_idx = itm->mq_idx;
+			bearer->ack_mq_idx = itm->mq_idx + ACK_MQ_OFFSET;
+		} else {
+			if (IS_ANCILLARY(itm->ip_type))
+				bearer->ack_mq_idx = itm->mq_idx;
+			else
+				bearer->mq_idx = itm->mq_idx;
+		}
+
+		qmi_rmnet_flow_control(dev, itm->mq_idx,
+				       bearer->grant_size > 0 ? 1 : 0);
+		trace_dfc_qmi_tc(dev->name, itm->bearer_id,
+				 bearer->grant_size, 0, itm->mq_idx,
+				 bearer->grant_size > 0 ? 1 : 0);
+
+		if (dfc_mode == DFC_MODE_SA) {
+			qmi_rmnet_flow_control(dev, bearer->ack_mq_idx,
+					bearer->grant_size > 0 ? 1 : 0);
+			trace_dfc_qmi_tc(dev->name, itm->bearer_id,
+					bearer->grant_size, 0,
+					bearer->ack_mq_idx,
+					bearer->grant_size > 0 ? 1 : 0);
+		}
+	}
+}
+
+static int __qmi_rmnet_rebind_flow(struct net_device *dev,
+				   struct qos_info *qos_info,
+				   struct rmnet_flow_map *itm,
+				   struct rmnet_flow_map *new_map)
+{
+	struct rmnet_bearer_map *bearer;
+
+	__qmi_rmnet_bearer_put(dev, qos_info, itm->bearer, false);
+
+	bearer = __qmi_rmnet_bearer_get(qos_info, new_map->bearer_id);
+	if (!bearer)
+		return -ENOMEM;
+
+	qmi_rmnet_update_flow_map(itm, new_map);
+	itm->bearer = bearer;
+
+	__qmi_rmnet_update_mq(dev, qos_info, bearer, itm);
+
+	return 0;
+}
+
 static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
 			      struct qmi_info *qmi)
 {
@@ -225,8 +353,7 @@ static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
 	struct rmnet_flow_map new_map, *itm;
 	struct rmnet_bearer_map *bearer;
 	struct tcmsg tmp_tcm;
-	struct mq_map *mq;
-	u32 mq_idx;
+	int rc = 0;
 
 	if (!qos_info || !tcm || tcm->tcm_handle >= MAX_MQ_NUM)
 		return -EINVAL;
@@ -251,14 +378,21 @@ static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
 	itm = qmi_rmnet_get_flow_map(qos_info, new_map.flow_id,
 				     new_map.ip_type);
 	if (itm) {
-		pr_debug("%s: stale flow found\n", __func__);
-		tmp_tcm.tcm__pad1 = itm->bearer_id;
-		tmp_tcm.tcm_parent = itm->flow_id;
-		tmp_tcm.tcm_ifindex = itm->ip_type;
-		tmp_tcm.tcm_handle = itm->mq_idx;
-		spin_unlock_bh(&qos_info->qos_lock);
-		qmi_rmnet_del_flow(dev, &tmp_tcm, qmi);
-		goto again;
+		if (itm->bearer_id != new_map.bearer_id) {
+			rc = __qmi_rmnet_rebind_flow(
+				dev, qos_info, itm, &new_map);
+			goto done;
+		} else if (itm->mq_idx != new_map.mq_idx) {
+			tmp_tcm.tcm__pad1 = itm->bearer_id;
+			tmp_tcm.tcm_parent = itm->flow_id;
+			tmp_tcm.tcm_ifindex = itm->ip_type;
+			tmp_tcm.tcm_handle = itm->mq_idx;
+			spin_unlock_bh(&qos_info->qos_lock);
+			qmi_rmnet_del_flow(dev, &tmp_tcm, qmi);
+			goto again;
+		} else {
+			goto done;
+		}
 	}
 
 	/* Create flow map */
@@ -272,45 +406,19 @@ static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
 	list_add(&itm->list, &qos_info->flow_head);
 
 	/* Create or update bearer map */
-	bearer = qmi_rmnet_get_bearer_map(qos_info, new_map.bearer_id);
-	if (bearer) {
-		bearer->flow_ref++;
-	} else {
-		bearer = kzalloc(sizeof(*bearer), GFP_ATOMIC);
-		if (!bearer) {
-			spin_unlock_bh(&qos_info->qos_lock);
-			return -ENOMEM;
-		}
-
-		bearer->bearer_id = new_map.bearer_id;
-		bearer->flow_ref = 1;
-		bearer->grant_size = qos_info->default_grant;
-		bearer->grant_thresh = qmi_rmnet_grant_per(bearer->grant_size);
-		qos_info->default_grant = DEFAULT_GRANT;
-		list_add(&bearer->list, &qos_info->bearer_head);
+	bearer = __qmi_rmnet_bearer_get(qos_info, new_map.bearer_id);
+	if (!bearer) {
+		rc = -ENOMEM;
+		goto done;
 	}
+
 	itm->bearer = bearer;
 
-	/* Update mq map */
-	mq_idx = tcm->tcm_handle;
-	mq = &qos_info->mq[mq_idx];
-	if (!mq->bearer) {
-		mq->bearer = bearer;
-		mq->ancillary = IS_ANCILLARY(new_map.ip_type);
+	__qmi_rmnet_update_mq(dev, qos_info, bearer, itm);
 
-		qmi_rmnet_flow_control(dev, mq_idx,
-				       bearer->grant_size > 0 ? 1 : 0);
-		trace_dfc_qmi_tc(dev->name, itm->bearer_id,
-				 bearer->grant_size, 0, mq_idx,
-				 bearer->grant_size > 0 ? 1 : 0);
-
-	} else if (mq->bearer->bearer_id != new_map.bearer_id) {
-		pr_debug("%s: un-managered bearer %u\n",
-				__func__, new_map.bearer_id);
-	}
-
+done:
 	spin_unlock_bh(&qos_info->qos_lock);
-	return 0;
+	return rc;
 }
 
 static int
@@ -319,9 +427,6 @@ qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
 {
 	struct qos_info *qos_info = (struct qos_info *)rmnet_get_qos_pt(dev);
 	struct rmnet_flow_map new_map, *itm;
-	struct rmnet_bearer_map *bearer;
-	struct mq_map *mq;
-	u32 mq_idx;
 
 	if (!qos_info)
 		return -EINVAL;
@@ -345,26 +450,7 @@ qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
 				    new_map.flow_id, new_map.ip_type,
 				    itm->mq_idx, 0);
 
-		bearer = itm->bearer;
-		if (bearer && --bearer->flow_ref == 0) {
-			/* Remove the bearer from mq map */
-			for (mq_idx = 0; mq_idx < MAX_MQ_NUM; mq_idx++) {
-				mq = &qos_info->mq[mq_idx];
-				if (mq->bearer != bearer)
-					continue;
-
-				mq->bearer = NULL;
-				mq->ancillary = false;
-				qmi_rmnet_reset_txq(dev, mq_idx);
-				qmi_rmnet_flow_control(dev, mq_idx, 1);
-				trace_dfc_qmi_tc(dev->name,
-					new_map.bearer_id, 0, 0, mq_idx, 1);
-			}
-
-			/* Remove from bearer map */
-			list_del(&bearer->list);
-			kfree(bearer);
-		}
+		__qmi_rmnet_bearer_put(dev, qos_info, itm->bearer, true);
 
 		/* Remove from flow map */
 		list_del(&itm->list);
@@ -682,11 +768,69 @@ void qmi_rmnet_burst_fc_check(struct net_device *dev,
 }
 EXPORT_SYMBOL(qmi_rmnet_burst_fc_check);
 
+static bool qmi_rmnet_is_tcp_ack(struct sk_buff *skb)
+{
+	unsigned int len = skb->len;
+
+	switch (skb->protocol) {
+	/* TCPv4 ACKs */
+	case htons(ETH_P_IP):
+		if ((ip_hdr(skb)->protocol == IPPROTO_TCP) &&
+		    (ip_hdr(skb)->ihl == 5) &&
+		    (len == 40 || len == 52) &&
+		    ((tcp_flag_word(tcp_hdr(skb)) & 0xFF00) == TCP_FLAG_ACK))
+			return true;
+		break;
+
+	/* TCPv6 ACKs */
+	case htons(ETH_P_IPV6):
+		if ((ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) &&
+		    (len == 60 || len == 72) &&
+		    ((tcp_flag_word(tcp_hdr(skb)) & 0xFF00) == TCP_FLAG_ACK))
+			return true;
+		break;
+	}
+
+	return false;
+}
+
+static int qmi_rmnet_get_queue_sa(struct qos_info *qos, struct sk_buff *skb)
+{
+	struct rmnet_flow_map *itm;
+	int ip_type;
+	int txq = DEFAULT_MQ_NUM;
+
+	/* Put RS/NS in default mq */
+	if (skb->protocol == htons(ETH_P_IPV6) &&
+	    ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6 &&
+	    (icmp6_hdr(skb)->icmp6_type == 133 ||
+	     icmp6_hdr(skb)->icmp6_type == 135)) {
+		return DEFAULT_MQ_NUM;
+	}
+
+	ip_type = (skb->protocol == htons(ETH_P_IPV6)) ? AF_INET6 : AF_INET;
+
+	spin_lock_bh(&qos->qos_lock);
+
+	itm = qmi_rmnet_get_flow_map(qos, skb->mark, ip_type);
+	if (unlikely(!itm))
+		goto done;
+
+	/* Put the packet in the assigned mq except TCP ack */
+	if (likely(itm->bearer) && qmi_rmnet_is_tcp_ack(skb))
+		txq = itm->bearer->ack_mq_idx;
+	else
+		txq = itm->mq_idx;
+
+done:
+	spin_unlock_bh(&qos->qos_lock);
+	return txq;
+}
+
 int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb)
 {
 	struct qos_info *qos = rmnet_get_qos_pt(dev);
 	int txq = 0, ip_type = AF_INET;
-	unsigned int len = skb->len;
 	struct rmnet_flow_map *itm;
 	u32 mark = skb->mark;
 
@@ -697,32 +841,18 @@ int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb)
 	if (dfc_mode == DFC_MODE_MQ_NUM)
 		return mark;
 
-	switch (skb->protocol) {
-	/* TCPv4 ACKs */
-	case htons(ETH_P_IP):
-		ip_type = AF_INET;
-		if ((!mark) &&
-		    (ip_hdr(skb)->protocol == IPPROTO_TCP) &&
-		    (len == 40 || len == 52) &&
-		    (ip_hdr(skb)->ihl == 5) &&
-		    ((tcp_flag_word(tcp_hdr(skb)) & 0xFF00) == TCP_FLAG_ACK))
-			return 1;
-		break;
-
-	/* TCPv6 ACKs */
-	case htons(ETH_P_IPV6):
-		ip_type = AF_INET6;
-		if ((!mark) &&
-		    (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) &&
-		    (len == 60 || len == 72) &&
-		    ((tcp_flag_word(tcp_hdr(skb)) & 0xFF00) == TCP_FLAG_ACK))
-			return 1;
-		/* Fall through */
-	}
+	if (dfc_mode == DFC_MODE_SA)
+		return qmi_rmnet_get_queue_sa(qos, skb);
 
 	/* Default flows */
-	if (!mark)
-		return 0;
+	if (!mark) {
+		if (qmi_rmnet_is_tcp_ack(skb))
+			return 1;
+		else
+			return 0;
+	}
+
+	ip_type = (skb->protocol == htons(ETH_P_IPV6)) ? AF_INET6 : AF_INET;
 
 	/* Dedicated flows */
 	spin_lock_bh(&qos->qos_lock);
@@ -755,7 +885,6 @@ void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id)
 
 	qos->mux_id = mux_id;
 	qos->real_dev = real_dev;
-	qos->default_grant = DEFAULT_GRANT;
 	qos->tran_num = 0;
 	INIT_LIST_HEAD(&qos->flow_head);
 	INIT_LIST_HEAD(&qos->bearer_head);
diff --git a/drivers/soc/qcom/qmi_rmnet_i.h b/drivers/soc/qcom/qmi_rmnet_i.h
index 15dee7c..d94b5ea 100644
--- a/drivers/soc/qcom/qmi_rmnet_i.h
+++ b/drivers/soc/qcom/qmi_rmnet_i.h
@@ -9,14 +9,19 @@
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
 
-#define MAX_MQ_NUM 10
+#define MAX_MQ_NUM 16
 #define MAX_CLIENT_NUM 2
 #define MAX_FLOW_NUM 32
 #define DEFAULT_GRANT 1
 #define DFC_MAX_BEARERS_V01 16
+#define DEFAULT_MQ_NUM 0
+#define ACK_MQ_OFFSET (MAX_MQ_NUM - 1)
+#define INVALID_MQ 0xFF
 
 #define DFC_MODE_FLOW_ID 2
 #define DFC_MODE_MQ_NUM 3
+#define DFC_MODE_SA 4
+
 extern int dfc_mode;
 extern int dfc_qmap;
 
@@ -34,6 +39,8 @@ struct rmnet_bearer_map {
 	bool rat_switch;
 	bool tx_off;
 	u32 ack_txid;
+	u32 mq_idx;
+	u32 ack_mq_idx;
 };
 
 struct rmnet_flow_map {
@@ -53,7 +60,6 @@ struct svc_info {
 
 struct mq_map {
 	struct rmnet_bearer_map *bearer;
-	bool ancillary;
 };
 
 struct qos_info {
@@ -62,7 +68,6 @@ struct qos_info {
 	struct list_head flow_head;
 	struct list_head bearer_head;
 	struct mq_map mq[MAX_MQ_NUM];
-	u32 default_grant;
 	u32 tran_num;
 	spinlock_t qos_lock;
 };
diff --git a/drivers/soc/qcom/rmnet_ctl/Kconfig b/drivers/soc/qcom/rmnet_ctl/Kconfig
index bfb91fbd..0085cd2 100644
--- a/drivers/soc/qcom/rmnet_ctl/Kconfig
+++ b/drivers/soc/qcom/rmnet_ctl/Kconfig
@@ -10,3 +10,9 @@
 	  Enable the RMNET CTL module which is used for communicating with
 	  device via map command protocol. This module will receive QMAP
 	  control commands via MHI.
+
+menuconfig RMNET_CTL_DEBUG
+	bool "RmNet Control debug"
+	depends on RMNET_CTL
+	help
+	  Enable RMNET CTL IPC debug loggings.
diff --git a/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_client.c b/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_client.c
index 299b301..17a53ec 100644
--- a/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_client.c
+++ b/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_client.c
@@ -6,8 +6,14 @@
  */
 
 #include <soc/qcom/rmnet_ctl.h>
+#include <linux/debugfs.h>
+#include <linux/ipc_logging.h>
 #include "rmnet_ctl_client.h"
 
+#define RMNET_CTL_LOG_PAGE 10
+#define RMNET_CTL_LOG_NAME "rmnet_ctl"
+#define RMNET_CTL_LOG_LVL  "ipc_log_lvl"
+
 struct rmnet_ctl_client {
 	struct rmnet_ctl_client_hooks hooks;
 };
@@ -15,14 +21,38 @@ struct rmnet_ctl_client {
 struct rmnet_ctl_endpoint {
 	struct rmnet_ctl_dev __rcu *dev;
 	struct rmnet_ctl_client __rcu *client;
+	struct dentry *dbgfs_dir;
+	struct dentry *dbgfs_loglvl;
+	void *ipc_log;
 };
 
+#ifdef CONFIG_RMNET_CTL_DEBUG
+static u8 ipc_log_lvl = RMNET_CTL_LOG_DEBUG;
+#else
+static u8 ipc_log_lvl = RMNET_CTL_LOG_ERR;
+#endif
+
 static DEFINE_SPINLOCK(client_lock);
 static struct rmnet_ctl_endpoint ctl_ep;
 
 void rmnet_ctl_endpoint_setdev(const struct rmnet_ctl_dev *dev)
 {
 	rcu_assign_pointer(ctl_ep.dev, dev);
+
+	if (dev) {
+		ctl_ep.dbgfs_dir = debugfs_create_dir(
+					RMNET_CTL_LOG_NAME, NULL);
+		if (!IS_ERR_OR_NULL(ctl_ep.dbgfs_dir))
+			ctl_ep.dbgfs_loglvl = debugfs_create_u8(
+				RMNET_CTL_LOG_LVL, 0644, ctl_ep.dbgfs_dir,
+				&ipc_log_lvl);
+
+		if (!ctl_ep.ipc_log)
+			ctl_ep.ipc_log = ipc_log_context_create(
+				RMNET_CTL_LOG_PAGE, RMNET_CTL_LOG_NAME, 0);
+	} else {
+		debugfs_remove_recursive(ctl_ep.dbgfs_dir);
+	}
 }
 
 void rmnet_ctl_endpoint_post(const void *data, size_t len)
@@ -33,6 +63,8 @@ void rmnet_ctl_endpoint_post(const void *data, size_t len)
 	if (unlikely(!data || !len))
 		return;
 
+	rmnet_ctl_log_info("RX", data, len);
+
 	rcu_read_lock();
 
 	client = rcu_dereference(ctl_ep.client);
@@ -109,6 +141,8 @@ int rmnet_ctl_send_client(void *handle, struct sk_buff *skb)
 	if (client != rcu_dereference(ctl_ep.client))
 		return rc;
 
+	rmnet_ctl_log_info("TX", skb->data, skb->len);
+
 	rcu_read_lock();
 
 	dev = rcu_dereference(ctl_ep.dev);
@@ -117,6 +151,23 @@ int rmnet_ctl_send_client(void *handle, struct sk_buff *skb)
 
 	rcu_read_unlock();
 
+	if (rc)
+		rmnet_ctl_log_err("TXE", rc, skb->data, skb->len);
+
 	return rc;
 }
 EXPORT_SYMBOL(rmnet_ctl_send_client);
+
+void rmnet_ctl_log(enum rmnet_ctl_log_lvl lvl, const char *msg,
+		   int rc, const void *data, unsigned int len)
+{
+	if (lvl <= ipc_log_lvl && ctl_ep.ipc_log) {
+		if (data == NULL || len == 0)
+			ipc_log_string(ctl_ep.ipc_log, "%3s(%d): (null)\n",
+				       msg, rc);
+		else
+			ipc_log_string(ctl_ep.ipc_log, "%3s(%d): %*ph\n",
+				       msg, rc, len > 32 ? 32 : len, data);
+	}
+}
+EXPORT_SYMBOL(rmnet_ctl_log);
diff --git a/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_mhi.c b/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_mhi.c
index af84e13..17f2528 100644
--- a/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_mhi.c
+++ b/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_mhi.c
@@ -10,9 +10,10 @@
 #include <linux/of.h>
 #include <linux/skbuff.h>
 #include <linux/mhi.h>
+#include <soc/qcom/rmnet_ctl.h>
 #include "rmnet_ctl_client.h"
 
-#define RMNET_CTL_DEFAULT_MRU 1024
+#define RMNET_CTL_DEFAULT_MRU 256
 
 struct rmnet_ctl_mhi_dev {
 	struct mhi_device *mhi_dev;
@@ -51,6 +52,12 @@ static void rmnet_ctl_alloc_buffers(struct rmnet_ctl_mhi_dev *ctl_dev,
 	int no_tre, i, rc;
 
 	no_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
+
+	if (!no_tre && free_buf) {
+		kfree(free_buf);
+		return;
+	}
+
 	for (i = 0; i < no_tre; i++) {
 		if (free_buf) {
 			buf = free_buf;
@@ -79,7 +86,12 @@ static void rmnet_ctl_dl_callback(struct mhi_device *mhi_dev,
 {
 	struct rmnet_ctl_mhi_dev *ctl_dev = dev_get_drvdata(&mhi_dev->dev);
 
-	if (mhi_res->transaction_status || !mhi_res->buf_addr) {
+	if (mhi_res->transaction_status == -ENOTCONN) {
+		kfree(mhi_res->buf_addr);
+		return;
+	} else if (mhi_res->transaction_status ||
+		   !mhi_res->buf_addr || !mhi_res->bytes_xferd) {
+		rmnet_ctl_log_err("RXE", mhi_res->transaction_status, NULL, 0);
 		ctl_dev->dev.stats.rx_err++;
 	} else {
 		ctl_dev->dev.stats.rx_pkts++;
@@ -98,7 +110,14 @@ static void rmnet_ctl_ul_callback(struct mhi_device *mhi_dev,
 	struct sk_buff *skb = (struct sk_buff *)mhi_res->buf_addr;
 
 	if (skb) {
-		ctl_dev->dev.stats.tx_complete++;
+		if (mhi_res->transaction_status) {
+			rmnet_ctl_log_err("TXE", mhi_res->transaction_status,
+					  skb->data, skb->len);
+			ctl_dev->dev.stats.tx_err++;
+		} else {
+			rmnet_ctl_log_debug("TXC", skb->data, skb->len);
+			ctl_dev->dev.stats.tx_complete++;
+		}
 		kfree_skb(skb);
 	}
 }
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c
index 84ab642..2662285 100644
--- a/drivers/soc/qcom/secure_buffer.c
+++ b/drivers/soc/qcom/secure_buffer.c
@@ -265,7 +265,7 @@ static int batched_hyp_assign(struct sg_table *table, struct scm_desc *desc)
 		batch_start += batches_processed;
 	}
 	total_delta = ktime_us_delta(ktime_get(), first_assign_ts);
-	trace_hyp_assign_end(total_delta, total_delta / i);
+	trace_hyp_assign_end(total_delta, div64_u64(total_delta, i));
 	kfree(sg_table_copy);
 	return ret;
 }
diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c
index 0a089cf..fe6683e 100644
--- a/drivers/staging/erofs/dir.c
+++ b/drivers/staging/erofs/dir.c
@@ -100,8 +100,15 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx)
 		unsigned nameoff, maxsize;
 
 		dentry_page = read_mapping_page(mapping, i, NULL);
-		if (IS_ERR(dentry_page))
-			continue;
+		if (dentry_page == ERR_PTR(-ENOMEM)) {
+			err = -ENOMEM;
+			break;
+		} else if (IS_ERR(dentry_page)) {
+			errln("fail to readdir of logical block %u of nid %llu",
+			      i, EROFS_V(dir)->nid);
+			err = PTR_ERR(dentry_page);
+			break;
+		}
 
 		lock_page(dentry_page);
 		de = (struct erofs_dirent *)kmap(dentry_page);
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
index ad6fe6d..0f1558c6 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -311,7 +311,11 @@ z_erofs_vle_work_lookup(struct super_block *sb,
 	/* if multiref is disabled, `primary' is always true */
 	primary = true;
 
-	DBG_BUGON(work->pageofs != pageofs);
+	if (work->pageofs != pageofs) {
+		DBG_BUGON(1);
+		erofs_workgroup_put(egrp);
+		return ERR_PTR(-EIO);
+	}
 
 	/*
 	 * lock must be taken first to avoid grp->next == NIL between
@@ -853,6 +857,7 @@ static int z_erofs_vle_unzip(struct super_block *sb,
 	for (i = 0; i < nr_pages; ++i)
 		pages[i] = NULL;
 
+	err = 0;
 	z_erofs_pagevec_ctor_init(&ctor,
 		Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, 0);
 
@@ -874,8 +879,17 @@ static int z_erofs_vle_unzip(struct super_block *sb,
 			pagenr = z_erofs_onlinepage_index(page);
 
 		DBG_BUGON(pagenr >= nr_pages);
-		DBG_BUGON(pages[pagenr]);
 
+		/*
+		 * currently EROFS doesn't support multiref(dedup),
+		 * so here erroring out one multiref page.
+		 */
+		if (pages[pagenr]) {
+			DBG_BUGON(1);
+			SetPageError(pages[pagenr]);
+			z_erofs_onlinepage_endio(pages[pagenr]);
+			err = -EIO;
+		}
 		pages[pagenr] = page;
 	}
 	sparsemem_pages = i;
@@ -885,7 +899,6 @@ static int z_erofs_vle_unzip(struct super_block *sb,
 	overlapped = false;
 	compressed_pages = grp->compressed_pages;
 
-	err = 0;
 	for (i = 0; i < clusterpages; ++i) {
 		unsigned pagenr;
 
@@ -911,7 +924,12 @@ static int z_erofs_vle_unzip(struct super_block *sb,
 			pagenr = z_erofs_onlinepage_index(page);
 
 			DBG_BUGON(pagenr >= nr_pages);
-			DBG_BUGON(pages[pagenr]);
+			if (pages[pagenr]) {
+				DBG_BUGON(1);
+				SetPageError(pages[pagenr]);
+				z_erofs_onlinepage_endio(pages[pagenr]);
+				err = -EIO;
+			}
 			++sparsemem_pages;
 			pages[pagenr] = page;
 
@@ -1335,19 +1353,18 @@ static int z_erofs_vle_normalaccess_readpage(struct file *file,
 	err = z_erofs_do_read_page(&f, page, &pagepool);
 	(void)z_erofs_vle_work_iter_end(&f.builder);
 
-	if (err) {
-		errln("%s, failed to read, err [%d]", __func__, err);
-		goto out;
-	}
-
+	/* if some compressed cluster ready, need submit them anyway */
 	z_erofs_submit_and_unzip(&f, &pagepool, true);
-out:
+
+	if (err)
+		errln("%s, failed to read, err [%d]", __func__, err);
+
 	if (f.m_iter.mpage != NULL)
 		put_page(f.m_iter.mpage);
 
 	/* clean up the remaining free pages */
 	put_pages_list(&pagepool);
-	return 0;
+	return err;
 }
 
 static inline int __z_erofs_vle_normalaccess_readpages(
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index a2df02d..16fcf63 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -819,7 +819,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
 	if (par->gamma.curves && gamma) {
 		if (fbtft_gamma_parse_str(par, par->gamma.curves, gamma,
 					  strlen(gamma)))
-			goto alloc_fail;
+			goto release_framebuf;
 	}
 
 	/* Transmit buffer */
@@ -836,7 +836,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
 	if (txbuflen > 0) {
 		txbuf = devm_kzalloc(par->info->device, txbuflen, GFP_KERNEL);
 		if (!txbuf)
-			goto alloc_fail;
+			goto release_framebuf;
 		par->txbuf.buf = txbuf;
 		par->txbuf.len = txbuflen;
 	}
@@ -872,6 +872,9 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
 
 	return info;
 
+release_framebuf:
+	framebuffer_release(info);
+
 alloc_fail:
 	vfree(vmem);
 
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 607804a..76f434c 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1755,8 +1755,10 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
 
 	priv->hw->max_signal = 100;
 
-	if (vnt_init(priv))
+	if (vnt_init(priv)) {
+		device_free_info(priv);
 		return -ENODEV;
+	}
 
 	device_print_info(priv);
 	pci_set_drvdata(pcid, priv);
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index d4cf09b..095df24 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -476,10 +476,8 @@ static int prism2_connect(struct wiphy *wiphy, struct net_device *dev,
 	/* Set the encryption - we only support wep */
 	if (is_wep) {
 		if (sme->key) {
-			if (sme->key_idx >= NUM_WEPKEYS) {
-				err = -EINVAL;
-				goto exit;
-			}
+			if (sme->key_idx >= NUM_WEPKEYS)
+				return -EINVAL;
 
 			result = prism2_domibset_uint32(wlandev,
 				DIDmib_dot11smt_dot11PrivacyTable_dot11WEPDefaultKeyID,
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 108f8b1..8556510 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -577,7 +577,7 @@ static bool of_thermal_is_trips_triggered(struct thermal_zone_device *tz,
 	bool triggered = false;
 
 	mutex_lock(&tz->lock);
-	last_temp = tz->last_temperature;
+	last_temp = tz->temperature;
 	for (trip = 0; trip < data->ntrips; trip++) {
 
 		if (!tz->tzp->tracks_low) {
diff --git a/drivers/thermal/qcom/qmi_cooling.c b/drivers/thermal/qcom/qmi_cooling.c
index 4526a96..ff1705a 100644
--- a/drivers/thermal/qcom/qmi_cooling.c
+++ b/drivers/thermal/qcom/qmi_cooling.c
@@ -218,6 +218,7 @@ static int qmi_tmd_send_state_request(struct qmi_cooling_device *qmi_cdev,
 			state, qmi_cdev->cdev_name, ret);
 		goto qmi_send_exit;
 	}
+	ret = 0;
 	pr_debug("Requested qmi state:%d for %s\n", state, qmi_cdev->cdev_name);
 
 qmi_send_exit:
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 04ff104..cb384fc 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -306,7 +306,7 @@ static void thermal_zone_device_set_polling(struct workqueue_struct *queue,
 		mod_delayed_work(queue, &tz->poll_queue,
 				 msecs_to_jiffies(delay));
 	else
-		cancel_delayed_work(&tz->poll_queue);
+		cancel_delayed_work_sync(&tz->poll_queue);
 }
 
 static void monitor_thermal_zone(struct thermal_zone_device *tz)
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index 40c69a5..dd5d8ee 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -87,13 +87,17 @@ static struct thermal_hwmon_device *
 thermal_hwmon_lookup_by_type(const struct thermal_zone_device *tz)
 {
 	struct thermal_hwmon_device *hwmon;
+	char type[THERMAL_NAME_LENGTH];
 
 	mutex_lock(&thermal_hwmon_list_lock);
-	list_for_each_entry(hwmon, &thermal_hwmon_list, node)
-		if (!strcmp(hwmon->type, tz->type)) {
+	list_for_each_entry(hwmon, &thermal_hwmon_list, node) {
+		strcpy(type, tz->type);
+		strreplace(type, '-', '_');
+		if (!strcmp(hwmon->type, type)) {
 			mutex_unlock(&thermal_hwmon_list_lock);
 			return hwmon;
 		}
+	}
 	mutex_unlock(&thermal_hwmon_list_lock);
 
 	return NULL;
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index 98d3ead..8df3058 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -837,7 +837,8 @@ static int __init ulite_init(void)
 static void __exit ulite_exit(void)
 {
 	platform_driver_unregister(&ulite_platform_driver);
-	uart_unregister_driver(&ulite_uart_driver);
+	if (ulite_uart_driver.state)
+		uart_unregister_driver(&ulite_uart_driver);
 }
 
 module_init(ulite_init);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index ac8025c..b4838ab 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1460,6 +1460,10 @@ static void release_one_tty(struct work_struct *work)
 
 	put_pid(tty->pgrp);
 	put_pid(tty->session);
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+	if (tty->echo_delayed_work.work.func)
+		cancel_delayed_work_sync(&tty->echo_delayed_work);
+#endif
 	free_tty_struct(tty);
 }
 
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 407a7a6..4a80103 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -445,6 +445,7 @@ static void usblp_cleanup(struct usblp *usblp)
 	kfree(usblp->readbuf);
 	kfree(usblp->device_id_string);
 	kfree(usblp->statusbuf);
+	usb_put_intf(usblp->intf);
 	kfree(usblp);
 }
 
@@ -461,10 +462,12 @@ static int usblp_release(struct inode *inode, struct file *file)
 
 	mutex_lock(&usblp_mutex);
 	usblp->used = 0;
-	if (usblp->present) {
+	if (usblp->present)
 		usblp_unlink_urbs(usblp);
-		usb_autopm_put_interface(usblp->intf);
-	} else		/* finish cleanup from disconnect */
+
+	usb_autopm_put_interface(usblp->intf);
+
+	if (!usblp->present)		/* finish cleanup from disconnect */
 		usblp_cleanup(usblp);
 	mutex_unlock(&usblp_mutex);
 	return 0;
@@ -1105,7 +1108,7 @@ static int usblp_probe(struct usb_interface *intf,
 	init_waitqueue_head(&usblp->wwait);
 	init_usb_anchor(&usblp->urbs);
 	usblp->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
-	usblp->intf = intf;
+	usblp->intf = usb_get_intf(intf);
 
 	/* Malloc device ID string buffer to the largest expected length,
 	 * since we can re-query it on an ioctl and a dynamic string
@@ -1194,6 +1197,7 @@ static int usblp_probe(struct usb_interface *intf,
 	kfree(usblp->readbuf);
 	kfree(usblp->statusbuf);
 	kfree(usblp->device_id_string);
+	usb_put_intf(usblp->intf);
 	kfree(usblp);
 abort_ret:
 	return retval;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 38a982f..d93b56b 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -3193,8 +3193,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
 {
 	u32			reg;
 
-	usb_phy_start_link_training(dwc->usb3_phy);
-
 	dwc->connected = true;
 
 	/*
@@ -3280,7 +3278,6 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
 	u8			speed;
 
 	dbg_event(0xFF, "CONNECT DONE", 0);
-	usb_phy_stop_link_training(dwc->usb3_phy);
 	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
 	speed = reg & DWC3_DSTS_CONNECTSPD;
 	dwc->speed = speed;
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 213b525..1505e55 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -48,6 +48,7 @@
 #define DRIVER_VERSION	"02 May 2005"
 
 #define POWER_BUDGET	500	/* in mA; use 8 for low-power port testing */
+#define POWER_BUDGET_3	900	/* in mA */
 
 static const char	driver_name[] = "dummy_hcd";
 static const char	driver_desc[] = "USB Host+Gadget Emulator";
@@ -2446,7 +2447,7 @@ static int dummy_start_ss(struct dummy_hcd *dum_hcd)
 	dum_hcd->rh_state = DUMMY_RH_RUNNING;
 	dum_hcd->stream_en_ep = 0;
 	INIT_LIST_HEAD(&dum_hcd->urbp_list);
-	dummy_hcd_to_hcd(dum_hcd)->power_budget = POWER_BUDGET;
+	dummy_hcd_to_hcd(dum_hcd)->power_budget = POWER_BUDGET_3;
 	dummy_hcd_to_hcd(dum_hcd)->state = HC_STATE_RUNNING;
 	dummy_hcd_to_hcd(dum_hcd)->uses_new_polling = 1;
 #ifdef CONFIG_USB_OTG
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index eafc2a0..21921db 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -1165,11 +1165,11 @@ static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
 			tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
 
 			bl = bytes - n;
-			if (bl > 3)
-				bl = 3;
+			if (bl > 4)
+				bl = 4;
 
 			for (i = 0; i < bl; i++)
-				data[n + i] = (u8) ((tmp >> (n * 8)) & 0xFF);
+				data[n + i] = (u8) ((tmp >> (i * 8)) & 0xFF);
 		}
 		break;
 
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 81479df..edef3e2 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -3154,10 +3154,10 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
 	if (usb_urb_dir_out(urb)) {
 		len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
 				   seg->bounce_buf, new_buff_len, enqd_len);
-		if (len != seg->bounce_len)
+		if (len != new_buff_len)
 			xhci_warn(xhci,
 				"WARN Wrong bounce buffer write length: %zu != %d\n",
-				len, seg->bounce_len);
+				len, new_buff_len);
 		seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
 						 max_pkt, DMA_TO_DEVICE);
 	} else {
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 4e08480..9f7e540 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1051,7 +1051,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
 	writel(command, &xhci->op_regs->command);
 	xhci->broken_suspend = 0;
 	if (xhci_handshake(&xhci->op_regs->status,
-				STS_SAVE, 0, 10 * 1000)) {
+				STS_SAVE, 0, 20 * 1000)) {
 	/*
 	 * AMD SNPS xHC 3.0 occasionally does not clear the
 	 * SSS bit of USBSTS and when driver tries to poll
@@ -1127,6 +1127,18 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
 		hibernated = true;
 
 	if (!hibernated) {
+		/*
+		 * Some controllers might lose power during suspend, so wait
+		 * for controller not ready bit to clear, just as in xHC init.
+		 */
+		retval = xhci_handshake(&xhci->op_regs->status,
+					STS_CNR, 0, 10 * 1000 * 1000);
+		if (retval) {
+			xhci_warn(xhci, "Controller not ready at resume %d\n",
+				  retval);
+			spin_unlock_irq(&xhci->lock);
+			return retval;
+		}
 		/* step 1: restore register */
 		xhci_restore_registers(xhci);
 		/* step 2: initialize command ring buffer */
@@ -3082,6 +3094,7 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
 	unsigned int ep_index;
 	unsigned long flags;
 	u32 ep_flag;
+	int err;
 
 	xhci = hcd_to_xhci(hcd);
 	if (!host_ep->hcpriv)
@@ -3131,7 +3144,17 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
 		xhci_free_command(xhci, cfg_cmd);
 		goto cleanup;
 	}
-	xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0);
+
+	err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id,
+					ep_index, 0);
+	if (err < 0) {
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		xhci_free_command(xhci, cfg_cmd);
+		xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ",
+				__func__, err);
+		goto cleanup;
+	}
+
 	xhci_ring_cmd_db(xhci);
 	spin_unlock_irqrestore(&xhci->lock, flags);
 
@@ -3145,8 +3168,16 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
 					   ctrl_ctx, ep_flag, ep_flag);
 	xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
 
-	xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
+	err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
 				      udev->slot_id, false);
+	if (err < 0) {
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		xhci_free_command(xhci, cfg_cmd);
+		xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ",
+				__func__, err);
+		goto cleanup;
+	}
+
 	xhci_ring_cmd_db(xhci);
 	spin_unlock_irqrestore(&xhci->lock, flags);
 
@@ -4660,12 +4691,12 @@ static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
 	alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
 		desc, state, timeout);
 
-	/* If we found we can't enable hub-initiated LPM, or
+	/* If we found we can't enable hub-initiated LPM, and
 	 * the U1 or U2 exit latency was too high to allow
-	 * device-initiated LPM as well, just stop searching.
+	 * device-initiated LPM as well, then we will disable LPM
+	 * for this device, so stop searching any further.
 	 */
-	if (alt_timeout == USB3_LPM_DISABLED ||
-			alt_timeout == USB3_LPM_DEVICE_INITIATED) {
+	if (alt_timeout == USB3_LPM_DISABLED) {
 		*timeout = alt_timeout;
 		return -E2BIG;
 	}
@@ -4776,10 +4807,12 @@ static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
 		if (intf->dev.driver) {
 			driver = to_usb_driver(intf->dev.driver);
 			if (driver && driver->disable_hub_initiated_lpm) {
-				dev_dbg(&udev->dev, "Hub-initiated %s disabled "
-						"at request of driver %s\n",
-						state_name, driver->name);
-				return xhci_get_timeout_no_hub_lpm(udev, state);
+				dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n",
+					state_name, driver->name);
+				timeout = xhci_get_timeout_no_hub_lpm(udev,
+								      state);
+				if (timeout == USB3_LPM_DISABLED)
+					return timeout;
 			}
 		}
 
@@ -5063,11 +5096,18 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
 		hcd->has_tt = 1;
 	} else {
 		/*
-		 * Some 3.1 hosts return sbrn 0x30, use xhci supported protocol
-		 * minor revision instead of sbrn. Minor revision is a two digit
-		 * BCD containing minor and sub-minor numbers, only show minor.
+		 * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts
+		 * should return 0x31 for sbrn, or that the minor revision
+		 * is a two digit BCD containig minor and sub-minor numbers.
+		 * This was later clarified in xHCI 1.2.
+		 *
+		 * Some USB 3.1 capable hosts therefore have sbrn 0x30, and
+		 * minor revision set to 0x1 instead of 0x10.
 		 */
-		minor_rev = xhci->usb3_rhub.min_rev / 0x10;
+		if (xhci->usb3_rhub.min_rev == 0x1)
+			minor_rev = 1;
+		else
+			minor_rev = xhci->usb3_rhub.min_rev / 0x10;
 
 		switch (minor_rev) {
 		case 2:
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 9f2f563..addbb47 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -721,6 +721,10 @@ static int mts_usb_probe(struct usb_interface *intf,
 
 	}
 
+	if (ep_in_current != &ep_in_set[2]) {
+		MTS_WARNING("couldn't find two input bulk endpoints. Bailing out.\n");
+		return -ENODEV;
+	}
 
 	if ( ep_out == -1 ) {
 		MTS_WARNING( "couldn't find an output bulk endpoint. Bailing out.\n" );
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index bbd0bee..5ebb23e 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -46,16 +46,6 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called usbsevseg.
 
-config USB_RIO500
-	tristate "USB Diamond Rio500 support"
-	help
-	  Say Y here if you want to connect a USB Rio500 mp3 player to your
-	  computer's USB port. Please read <file:Documentation/usb/rio.txt>
-	  for more information.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called rio500.
-
 config USB_LEGOTOWER
 	tristate "USB Lego Infrared Tower support"
 	help
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index 4f7d202..6eb379b 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -17,7 +17,6 @@
 obj-$(CONFIG_USB_LCD)			+= usblcd.o
 obj-$(CONFIG_USB_LD)			+= ldusb.o
 obj-$(CONFIG_USB_LEGOTOWER)		+= legousbtower.o
-obj-$(CONFIG_USB_RIO500)		+= rio500.o
 obj-$(CONFIG_USB_TEST)			+= usbtest.o
 obj-$(CONFIG_USB_EHSET_TEST_FIXTURE)    += ehset.o
 obj-$(CONFIG_USB_TRANCEVIBRATOR)	+= trancevibrator.o
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index 9465fb9..9a51760 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -75,6 +75,7 @@ struct adu_device {
 	char			serial_number[8];
 
 	int			open_count; /* number of times this port has been opened */
+	unsigned long		disconnected:1;
 
 	char		*read_buffer_primary;
 	int			read_buffer_length;
@@ -116,7 +117,7 @@ static void adu_abort_transfers(struct adu_device *dev)
 {
 	unsigned long flags;
 
-	if (dev->udev == NULL)
+	if (dev->disconnected)
 		return;
 
 	/* shutdown transfer */
@@ -148,6 +149,7 @@ static void adu_delete(struct adu_device *dev)
 	kfree(dev->read_buffer_secondary);
 	kfree(dev->interrupt_in_buffer);
 	kfree(dev->interrupt_out_buffer);
+	usb_put_dev(dev->udev);
 	kfree(dev);
 }
 
@@ -243,7 +245,7 @@ static int adu_open(struct inode *inode, struct file *file)
 	}
 
 	dev = usb_get_intfdata(interface);
-	if (!dev || !dev->udev) {
+	if (!dev) {
 		retval = -ENODEV;
 		goto exit_no_device;
 	}
@@ -326,7 +328,7 @@ static int adu_release(struct inode *inode, struct file *file)
 	}
 
 	adu_release_internal(dev);
-	if (dev->udev == NULL) {
+	if (dev->disconnected) {
 		/* the device was unplugged before the file was released */
 		if (!dev->open_count)	/* ... and we're the last user */
 			adu_delete(dev);
@@ -355,7 +357,7 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
 		return -ERESTARTSYS;
 
 	/* verify that the device wasn't unplugged */
-	if (dev->udev == NULL) {
+	if (dev->disconnected) {
 		retval = -ENODEV;
 		pr_err("No device or device unplugged %d\n", retval);
 		goto exit;
@@ -520,7 +522,7 @@ static ssize_t adu_write(struct file *file, const __user char *buffer,
 		goto exit_nolock;
 
 	/* verify that the device wasn't unplugged */
-	if (dev->udev == NULL) {
+	if (dev->disconnected) {
 		retval = -ENODEV;
 		pr_err("No device or device unplugged %d\n", retval);
 		goto exit;
@@ -665,7 +667,7 @@ static int adu_probe(struct usb_interface *interface,
 
 	mutex_init(&dev->mtx);
 	spin_lock_init(&dev->buflock);
-	dev->udev = udev;
+	dev->udev = usb_get_dev(udev);
 	init_waitqueue_head(&dev->read_wait);
 	init_waitqueue_head(&dev->write_wait);
 
@@ -764,14 +766,18 @@ static void adu_disconnect(struct usb_interface *interface)
 
 	dev = usb_get_intfdata(interface);
 
-	mutex_lock(&dev->mtx);	/* not interruptible */
-	dev->udev = NULL;	/* poison */
 	usb_deregister_dev(interface, &adu_class);
-	mutex_unlock(&dev->mtx);
+
+	usb_poison_urb(dev->interrupt_in_urb);
+	usb_poison_urb(dev->interrupt_out_urb);
 
 	mutex_lock(&adutux_mutex);
 	usb_set_intfdata(interface, NULL);
 
+	mutex_lock(&dev->mtx);	/* not interruptible */
+	dev->disconnected = 1;
+	mutex_unlock(&dev->mtx);
+
 	/* if the device is not opened, then we clean up right now */
 	if (!dev->open_count)
 		adu_delete(dev);
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
index cf5828c..34e6cd6 100644
--- a/drivers/usb/misc/chaoskey.c
+++ b/drivers/usb/misc/chaoskey.c
@@ -98,6 +98,7 @@ static void chaoskey_free(struct chaoskey *dev)
 		usb_free_urb(dev->urb);
 		kfree(dev->name);
 		kfree(dev->buf);
+		usb_put_intf(dev->interface);
 		kfree(dev);
 	}
 }
@@ -145,6 +146,8 @@ static int chaoskey_probe(struct usb_interface *interface,
 	if (dev == NULL)
 		goto out;
 
+	dev->interface = usb_get_intf(interface);
+
 	dev->buf = kmalloc(size, GFP_KERNEL);
 
 	if (dev->buf == NULL)
@@ -174,8 +177,6 @@ static int chaoskey_probe(struct usb_interface *interface,
 			goto out;
 	}
 
-	dev->interface = interface;
-
 	dev->in_ep = in_ep;
 
 	if (le16_to_cpu(udev->descriptor.idVendor) != ALEA_VENDOR_ID)
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 55db0fc..2d9d949 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -87,6 +87,7 @@ struct iowarrior {
 	char chip_serial[9];		/* the serial number string of the chip connected */
 	int report_size;		/* number of bytes in a report */
 	u16 product_id;
+	struct usb_anchor submitted;
 };
 
 /*--------------*/
@@ -243,6 +244,7 @@ static inline void iowarrior_delete(struct iowarrior *dev)
 	kfree(dev->int_in_buffer);
 	usb_free_urb(dev->int_in_urb);
 	kfree(dev->read_queue);
+	usb_put_intf(dev->interface);
 	kfree(dev);
 }
 
@@ -424,11 +426,13 @@ static ssize_t iowarrior_write(struct file *file,
 			retval = -EFAULT;
 			goto error;
 		}
+		usb_anchor_urb(int_out_urb, &dev->submitted);
 		retval = usb_submit_urb(int_out_urb, GFP_KERNEL);
 		if (retval) {
 			dev_dbg(&dev->interface->dev,
 				"submit error %d for urb nr.%d\n",
 				retval, atomic_read(&dev->write_busy));
+			usb_unanchor_urb(int_out_urb);
 			goto error;
 		}
 		/* submit was ok */
@@ -764,11 +768,13 @@ static int iowarrior_probe(struct usb_interface *interface,
 	init_waitqueue_head(&dev->write_wait);
 
 	dev->udev = udev;
-	dev->interface = interface;
+	dev->interface = usb_get_intf(interface);
 
 	iface_desc = interface->cur_altsetting;
 	dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
 
+	init_usb_anchor(&dev->submitted);
+
 	res = usb_find_last_int_in_endpoint(iface_desc, &dev->int_in_endpoint);
 	if (res) {
 		dev_err(&interface->dev, "no interrupt-in endpoint found\n");
@@ -866,8 +872,6 @@ static void iowarrior_disconnect(struct usb_interface *interface)
 	dev = usb_get_intfdata(interface);
 	mutex_lock(&iowarrior_open_disc_lock);
 	usb_set_intfdata(interface, NULL);
-	/* prevent device read, write and ioctl */
-	dev->present = 0;
 
 	minor = dev->minor;
 	mutex_unlock(&iowarrior_open_disc_lock);
@@ -878,8 +882,7 @@ static void iowarrior_disconnect(struct usb_interface *interface)
 	mutex_lock(&dev->mutex);
 
 	/* prevent device read, write and ioctl */
-
-	mutex_unlock(&dev->mutex);
+	dev->present = 0;
 
 	if (dev->opened) {
 		/* There is a process that holds a filedescriptor to the device ,
@@ -887,10 +890,13 @@ static void iowarrior_disconnect(struct usb_interface *interface)
 		   Deleting the device is postponed until close() was called.
 		 */
 		usb_kill_urb(dev->int_in_urb);
+		usb_kill_anchored_urbs(&dev->submitted);
 		wake_up_interruptible(&dev->read_wait);
 		wake_up_interruptible(&dev->write_wait);
+		mutex_unlock(&dev->mutex);
 	} else {
 		/* no process is using the device, cleanup now */
+		mutex_unlock(&dev->mutex);
 		iowarrior_delete(dev);
 	}
 
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 006762b..6b3a6fd 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -153,6 +153,7 @@ MODULE_PARM_DESC(min_interrupt_out_interval, "Minimum interrupt out interval in
 struct ld_usb {
 	struct mutex		mutex;		/* locks this structure */
 	struct usb_interface	*intf;		/* save off the usb interface pointer */
+	unsigned long		disconnected:1;
 
 	int			open_count;	/* number of times this port has been opened */
 
@@ -192,12 +193,10 @@ static void ld_usb_abort_transfers(struct ld_usb *dev)
 	/* shutdown transfer */
 	if (dev->interrupt_in_running) {
 		dev->interrupt_in_running = 0;
-		if (dev->intf)
-			usb_kill_urb(dev->interrupt_in_urb);
+		usb_kill_urb(dev->interrupt_in_urb);
 	}
 	if (dev->interrupt_out_busy)
-		if (dev->intf)
-			usb_kill_urb(dev->interrupt_out_urb);
+		usb_kill_urb(dev->interrupt_out_urb);
 }
 
 /**
@@ -205,8 +204,6 @@ static void ld_usb_abort_transfers(struct ld_usb *dev)
  */
 static void ld_usb_delete(struct ld_usb *dev)
 {
-	ld_usb_abort_transfers(dev);
-
 	/* free data structures */
 	usb_free_urb(dev->interrupt_in_urb);
 	usb_free_urb(dev->interrupt_out_urb);
@@ -263,7 +260,7 @@ static void ld_usb_interrupt_in_callback(struct urb *urb)
 
 resubmit:
 	/* resubmit if we're still running */
-	if (dev->interrupt_in_running && !dev->buffer_overflow && dev->intf) {
+	if (dev->interrupt_in_running && !dev->buffer_overflow) {
 		retval = usb_submit_urb(dev->interrupt_in_urb, GFP_ATOMIC);
 		if (retval) {
 			dev_err(&dev->intf->dev,
@@ -383,16 +380,13 @@ static int ld_usb_release(struct inode *inode, struct file *file)
 		goto exit;
 	}
 
-	if (mutex_lock_interruptible(&dev->mutex)) {
-		retval = -ERESTARTSYS;
-		goto exit;
-	}
+	mutex_lock(&dev->mutex);
 
 	if (dev->open_count != 1) {
 		retval = -ENODEV;
 		goto unlock_exit;
 	}
-	if (dev->intf == NULL) {
+	if (dev->disconnected) {
 		/* the device was unplugged before the file was released */
 		mutex_unlock(&dev->mutex);
 		/* unlock here as ld_usb_delete frees dev */
@@ -423,7 +417,7 @@ static __poll_t ld_usb_poll(struct file *file, poll_table *wait)
 
 	dev = file->private_data;
 
-	if (!dev->intf)
+	if (dev->disconnected)
 		return EPOLLERR | EPOLLHUP;
 
 	poll_wait(file, &dev->read_wait, wait);
@@ -462,7 +456,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
 	}
 
 	/* verify that the device wasn't unplugged */
-	if (dev->intf == NULL) {
+	if (dev->disconnected) {
 		retval = -ENODEV;
 		printk(KERN_ERR "ldusb: No device or device unplugged %d\n", retval);
 		goto unlock_exit;
@@ -470,7 +464,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
 
 	/* wait for data */
 	spin_lock_irq(&dev->rbsl);
-	if (dev->ring_head == dev->ring_tail) {
+	while (dev->ring_head == dev->ring_tail) {
 		dev->interrupt_in_done = 0;
 		spin_unlock_irq(&dev->rbsl);
 		if (file->f_flags & O_NONBLOCK) {
@@ -480,12 +474,17 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
 		retval = wait_event_interruptible(dev->read_wait, dev->interrupt_in_done);
 		if (retval < 0)
 			goto unlock_exit;
-	} else {
-		spin_unlock_irq(&dev->rbsl);
+
+		spin_lock_irq(&dev->rbsl);
 	}
+	spin_unlock_irq(&dev->rbsl);
 
 	/* actual_buffer contains actual_length + interrupt_in_buffer */
 	actual_buffer = (size_t *)(dev->ring_buffer + dev->ring_tail * (sizeof(size_t)+dev->interrupt_in_endpoint_size));
+	if (*actual_buffer > dev->interrupt_in_endpoint_size) {
+		retval = -EIO;
+		goto unlock_exit;
+	}
 	bytes_to_read = min(count, *actual_buffer);
 	if (bytes_to_read < *actual_buffer)
 		dev_warn(&dev->intf->dev, "Read buffer overflow, %zd bytes dropped\n",
@@ -542,7 +541,7 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer,
 	}
 
 	/* verify that the device wasn't unplugged */
-	if (dev->intf == NULL) {
+	if (dev->disconnected) {
 		retval = -ENODEV;
 		printk(KERN_ERR "ldusb: No device or device unplugged %d\n", retval);
 		goto unlock_exit;
@@ -696,10 +695,9 @@ static int ld_usb_probe(struct usb_interface *intf, const struct usb_device_id *
 		dev_warn(&intf->dev, "Interrupt out endpoint not found (using control endpoint instead)\n");
 
 	dev->interrupt_in_endpoint_size = usb_endpoint_maxp(dev->interrupt_in_endpoint);
-	dev->ring_buffer =
-		kmalloc_array(ring_buffer_size,
-			      sizeof(size_t) + dev->interrupt_in_endpoint_size,
-			      GFP_KERNEL);
+	dev->ring_buffer = kcalloc(ring_buffer_size,
+			sizeof(size_t) + dev->interrupt_in_endpoint_size,
+			GFP_KERNEL);
 	if (!dev->ring_buffer)
 		goto error;
 	dev->interrupt_in_buffer = kmalloc(dev->interrupt_in_endpoint_size, GFP_KERNEL);
@@ -764,6 +762,9 @@ static void ld_usb_disconnect(struct usb_interface *intf)
 	/* give back our minor */
 	usb_deregister_dev(intf, &ld_usb_class);
 
+	usb_poison_urb(dev->interrupt_in_urb);
+	usb_poison_urb(dev->interrupt_out_urb);
+
 	mutex_lock(&dev->mutex);
 
 	/* if the device is not opened, then we clean up right now */
@@ -771,7 +772,7 @@ static void ld_usb_disconnect(struct usb_interface *intf)
 		mutex_unlock(&dev->mutex);
 		ld_usb_delete(dev);
 	} else {
-		dev->intf = NULL;
+		dev->disconnected = 1;
 		/* wake up pollers */
 		wake_up_interruptible_all(&dev->read_wait);
 		wake_up_interruptible_all(&dev->write_wait);
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index 006cf13..62dab24 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -179,7 +179,6 @@ static const struct usb_device_id tower_table[] = {
 };
 
 MODULE_DEVICE_TABLE (usb, tower_table);
-static DEFINE_MUTEX(open_disc_mutex);
 
 #define LEGO_USB_TOWER_MINOR_BASE	160
 
@@ -191,6 +190,7 @@ struct lego_usb_tower {
 	unsigned char		minor;		/* the starting minor number for this device */
 
 	int			open_count;	/* number of times this port has been opened */
+	unsigned long		disconnected:1;
 
 	char*			read_buffer;
 	size_t			read_buffer_length; /* this much came in */
@@ -290,14 +290,13 @@ static inline void lego_usb_tower_debug_data(struct device *dev,
  */
 static inline void tower_delete (struct lego_usb_tower *dev)
 {
-	tower_abort_transfers (dev);
-
 	/* free data structures */
 	usb_free_urb(dev->interrupt_in_urb);
 	usb_free_urb(dev->interrupt_out_urb);
 	kfree (dev->read_buffer);
 	kfree (dev->interrupt_in_buffer);
 	kfree (dev->interrupt_out_buffer);
+	usb_put_dev(dev->udev);
 	kfree (dev);
 }
 
@@ -332,18 +331,14 @@ static int tower_open (struct inode *inode, struct file *file)
 		goto exit;
 	}
 
-	mutex_lock(&open_disc_mutex);
 	dev = usb_get_intfdata(interface);
-
 	if (!dev) {
-		mutex_unlock(&open_disc_mutex);
 		retval = -ENODEV;
 		goto exit;
 	}
 
 	/* lock this device */
 	if (mutex_lock_interruptible(&dev->lock)) {
-		mutex_unlock(&open_disc_mutex);
 	        retval = -ERESTARTSYS;
 		goto exit;
 	}
@@ -351,12 +346,9 @@ static int tower_open (struct inode *inode, struct file *file)
 
 	/* allow opening only once */
 	if (dev->open_count) {
-		mutex_unlock(&open_disc_mutex);
 		retval = -EBUSY;
 		goto unlock_exit;
 	}
-	dev->open_count = 1;
-	mutex_unlock(&open_disc_mutex);
 
 	/* reset the tower */
 	result = usb_control_msg (dev->udev,
@@ -396,13 +388,14 @@ static int tower_open (struct inode *inode, struct file *file)
 		dev_err(&dev->udev->dev,
 			"Couldn't submit interrupt_in_urb %d\n", retval);
 		dev->interrupt_in_running = 0;
-		dev->open_count = 0;
 		goto unlock_exit;
 	}
 
 	/* save device in the file's private structure */
 	file->private_data = dev;
 
+	dev->open_count = 1;
+
 unlock_exit:
 	mutex_unlock(&dev->lock);
 
@@ -423,22 +416,19 @@ static int tower_release (struct inode *inode, struct file *file)
 
 	if (dev == NULL) {
 		retval = -ENODEV;
-		goto exit_nolock;
-	}
-
-	mutex_lock(&open_disc_mutex);
-	if (mutex_lock_interruptible(&dev->lock)) {
-	        retval = -ERESTARTSYS;
 		goto exit;
 	}
 
+	mutex_lock(&dev->lock);
+
 	if (dev->open_count != 1) {
 		dev_dbg(&dev->udev->dev, "%s: device not opened exactly once\n",
 			__func__);
 		retval = -ENODEV;
 		goto unlock_exit;
 	}
-	if (dev->udev == NULL) {
+
+	if (dev->disconnected) {
 		/* the device was unplugged before the file was released */
 
 		/* unlock here as tower_delete frees dev */
@@ -456,10 +446,7 @@ static int tower_release (struct inode *inode, struct file *file)
 
 unlock_exit:
 	mutex_unlock(&dev->lock);
-
 exit:
-	mutex_unlock(&open_disc_mutex);
-exit_nolock:
 	return retval;
 }
 
@@ -477,10 +464,9 @@ static void tower_abort_transfers (struct lego_usb_tower *dev)
 	if (dev->interrupt_in_running) {
 		dev->interrupt_in_running = 0;
 		mb();
-		if (dev->udev)
-			usb_kill_urb (dev->interrupt_in_urb);
+		usb_kill_urb(dev->interrupt_in_urb);
 	}
-	if (dev->interrupt_out_busy && dev->udev)
+	if (dev->interrupt_out_busy)
 		usb_kill_urb(dev->interrupt_out_urb);
 }
 
@@ -516,7 +502,7 @@ static __poll_t tower_poll (struct file *file, poll_table *wait)
 
 	dev = file->private_data;
 
-	if (!dev->udev)
+	if (dev->disconnected)
 		return EPOLLERR | EPOLLHUP;
 
 	poll_wait(file, &dev->read_wait, wait);
@@ -563,7 +549,7 @@ static ssize_t tower_read (struct file *file, char __user *buffer, size_t count,
 	}
 
 	/* verify that the device wasn't unplugged */
-	if (dev->udev == NULL) {
+	if (dev->disconnected) {
 		retval = -ENODEV;
 		pr_err("No device or device unplugged %d\n", retval);
 		goto unlock_exit;
@@ -649,7 +635,7 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
 	}
 
 	/* verify that the device wasn't unplugged */
-	if (dev->udev == NULL) {
+	if (dev->disconnected) {
 		retval = -ENODEV;
 		pr_err("No device or device unplugged %d\n", retval);
 		goto unlock_exit;
@@ -759,7 +745,7 @@ static void tower_interrupt_in_callback (struct urb *urb)
 
 resubmit:
 	/* resubmit if we're still running */
-	if (dev->interrupt_in_running && dev->udev) {
+	if (dev->interrupt_in_running) {
 		retval = usb_submit_urb (dev->interrupt_in_urb, GFP_ATOMIC);
 		if (retval)
 			dev_err(&dev->udev->dev,
@@ -822,8 +808,9 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
 
 	mutex_init(&dev->lock);
 
-	dev->udev = udev;
+	dev->udev = usb_get_dev(udev);
 	dev->open_count = 0;
+	dev->disconnected = 0;
 
 	dev->read_buffer = NULL;
 	dev->read_buffer_length = 0;
@@ -891,8 +878,10 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
 				  get_version_reply,
 				  sizeof(*get_version_reply),
 				  1000);
-	if (result < 0) {
-		dev_err(idev, "LEGO USB Tower get version control request failed\n");
+	if (result < sizeof(*get_version_reply)) {
+		if (result >= 0)
+			result = -EIO;
+		dev_err(idev, "get version request failed: %d\n", result);
 		retval = result;
 		goto error;
 	}
@@ -910,7 +899,6 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
 	if (retval) {
 		/* something prevented us from registering this driver */
 		dev_err(idev, "Not able to get a minor for this device.\n");
-		usb_set_intfdata (interface, NULL);
 		goto error;
 	}
 	dev->minor = interface->minor;
@@ -942,23 +930,24 @@ static void tower_disconnect (struct usb_interface *interface)
 	int minor;
 
 	dev = usb_get_intfdata (interface);
-	mutex_lock(&open_disc_mutex);
-	usb_set_intfdata (interface, NULL);
 
 	minor = dev->minor;
 
-	/* give back our minor */
+	/* give back our minor and prevent further open() */
 	usb_deregister_dev (interface, &tower_class);
 
+	/* stop I/O */
+	usb_poison_urb(dev->interrupt_in_urb);
+	usb_poison_urb(dev->interrupt_out_urb);
+
 	mutex_lock(&dev->lock);
-	mutex_unlock(&open_disc_mutex);
 
 	/* if the device is not opened, then we clean up right now */
 	if (!dev->open_count) {
 		mutex_unlock(&dev->lock);
 		tower_delete (dev);
 	} else {
-		dev->udev = NULL;
+		dev->disconnected = 1;
 		/* wake up pollers */
 		wake_up_interruptible_all(&dev->read_wait);
 		wake_up_interruptible_all(&dev->write_wait);
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
deleted file mode 100644
index a32d61a..0000000
--- a/drivers/usb/misc/rio500.c
+++ /dev/null
@@ -1,561 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/* -*- linux-c -*- */
-
-/* 
- * Driver for USB Rio 500
- *
- * Cesar Miquel (miquel@df.uba.ar)
- * 
- * based on hp_scanner.c by David E. Nelson (dnelson@jump.net)
- *
- * Based upon mouse.c (Brad Keryan) and printer.c (Michael Gee).
- *
- * Changelog:
- * 30/05/2003  replaced lock/unlock kernel with up/down
- *             Daniele Bellucci  bellucda@tiscali.it
- * */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/sched/signal.h>
-#include <linux/mutex.h>
-#include <linux/errno.h>
-#include <linux/random.h>
-#include <linux/poll.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/usb.h>
-#include <linux/wait.h>
-
-#include "rio500_usb.h"
-
-#define DRIVER_AUTHOR "Cesar Miquel <miquel@df.uba.ar>"
-#define DRIVER_DESC "USB Rio 500 driver"
-
-#define RIO_MINOR	64
-
-/* stall/wait timeout for rio */
-#define NAK_TIMEOUT (HZ)
-
-#define IBUF_SIZE 0x1000
-
-/* Size of the rio buffer */
-#define OBUF_SIZE 0x10000
-
-struct rio_usb_data {
-        struct usb_device *rio_dev;     /* init: probe_rio */
-        unsigned int ifnum;             /* Interface number of the USB device */
-        int isopen;                     /* nz if open */
-        int present;                    /* Device is present on the bus */
-        char *obuf, *ibuf;              /* transfer buffers */
-        char bulk_in_ep, bulk_out_ep;   /* Endpoint assignments */
-        wait_queue_head_t wait_q;       /* for timeouts */
-	struct mutex lock;          /* general race avoidance */
-};
-
-static DEFINE_MUTEX(rio500_mutex);
-static struct rio_usb_data rio_instance;
-
-static int open_rio(struct inode *inode, struct file *file)
-{
-	struct rio_usb_data *rio = &rio_instance;
-
-	/* against disconnect() */
-	mutex_lock(&rio500_mutex);
-	mutex_lock(&(rio->lock));
-
-	if (rio->isopen || !rio->present) {
-		mutex_unlock(&(rio->lock));
-		mutex_unlock(&rio500_mutex);
-		return -EBUSY;
-	}
-	rio->isopen = 1;
-
-	init_waitqueue_head(&rio->wait_q);
-
-	mutex_unlock(&(rio->lock));
-
-	dev_info(&rio->rio_dev->dev, "Rio opened.\n");
-	mutex_unlock(&rio500_mutex);
-
-	return 0;
-}
-
-static int close_rio(struct inode *inode, struct file *file)
-{
-	struct rio_usb_data *rio = &rio_instance;
-
-	/* against disconnect() */
-	mutex_lock(&rio500_mutex);
-	mutex_lock(&(rio->lock));
-
-	rio->isopen = 0;
-	if (!rio->present) {
-		/* cleanup has been delayed */
-		kfree(rio->ibuf);
-		kfree(rio->obuf);
-		rio->ibuf = NULL;
-		rio->obuf = NULL;
-	} else {
-		dev_info(&rio->rio_dev->dev, "Rio closed.\n");
-	}
-	mutex_unlock(&(rio->lock));
-	mutex_unlock(&rio500_mutex);
-	return 0;
-}
-
-static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg)
-{
-	struct RioCommand rio_cmd;
-	struct rio_usb_data *rio = &rio_instance;
-	void __user *data;
-	unsigned char *buffer;
-	int result, requesttype;
-	int retries;
-	int retval=0;
-
-	mutex_lock(&(rio->lock));
-        /* Sanity check to make sure rio is connected, powered, etc */
-        if (rio->present == 0 || rio->rio_dev == NULL) {
-		retval = -ENODEV;
-		goto err_out;
-	}
-
-	switch (cmd) {
-	case RIO_RECV_COMMAND:
-		data = (void __user *) arg;
-		if (data == NULL)
-			break;
-		if (copy_from_user(&rio_cmd, data, sizeof(struct RioCommand))) {
-			retval = -EFAULT;
-			goto err_out;
-		}
-		if (rio_cmd.length < 0 || rio_cmd.length > PAGE_SIZE) {
-			retval = -EINVAL;
-			goto err_out;
-		}
-		buffer = (unsigned char *) __get_free_page(GFP_KERNEL);
-		if (buffer == NULL) {
-			retval = -ENOMEM;
-			goto err_out;
-		}
-		if (copy_from_user(buffer, rio_cmd.buffer, rio_cmd.length)) {
-			retval = -EFAULT;
-			free_page((unsigned long) buffer);
-			goto err_out;
-		}
-
-		requesttype = rio_cmd.requesttype | USB_DIR_IN |
-		    USB_TYPE_VENDOR | USB_RECIP_DEVICE;
-		dev_dbg(&rio->rio_dev->dev,
-			"sending command:reqtype=%0x req=%0x value=%0x index=%0x len=%0x\n",
-			requesttype, rio_cmd.request, rio_cmd.value,
-			rio_cmd.index, rio_cmd.length);
-		/* Send rio control message */
-		retries = 3;
-		while (retries) {
-			result = usb_control_msg(rio->rio_dev,
-						 usb_rcvctrlpipe(rio-> rio_dev, 0),
-						 rio_cmd.request,
-						 requesttype,
-						 rio_cmd.value,
-						 rio_cmd.index, buffer,
-						 rio_cmd.length,
-						 jiffies_to_msecs(rio_cmd.timeout));
-			if (result == -ETIMEDOUT)
-				retries--;
-			else if (result < 0) {
-				dev_err(&rio->rio_dev->dev,
-					"Error executing ioctrl. code = %d\n",
-					result);
-				retries = 0;
-			} else {
-				dev_dbg(&rio->rio_dev->dev,
-					"Executed ioctl. Result = %d (data=%02x)\n",
-					result, buffer[0]);
-				if (copy_to_user(rio_cmd.buffer, buffer,
-						 rio_cmd.length)) {
-					free_page((unsigned long) buffer);
-					retval = -EFAULT;
-					goto err_out;
-				}
-				retries = 0;
-			}
-
-			/* rio_cmd.buffer contains a raw stream of single byte
-			   data which has been returned from rio.  Data is
-			   interpreted at application level.  For data that
-			   will be cast to data types longer than 1 byte, data
-			   will be little_endian and will potentially need to
-			   be swapped at the app level */
-
-		}
-		free_page((unsigned long) buffer);
-		break;
-
-	case RIO_SEND_COMMAND:
-		data = (void __user *) arg;
-		if (data == NULL)
-			break;
-		if (copy_from_user(&rio_cmd, data, sizeof(struct RioCommand))) {
-			retval = -EFAULT;
-			goto err_out;
-		}
-		if (rio_cmd.length < 0 || rio_cmd.length > PAGE_SIZE) {
-			retval = -EINVAL;
-			goto err_out;
-		}
-		buffer = (unsigned char *) __get_free_page(GFP_KERNEL);
-		if (buffer == NULL) {
-			retval = -ENOMEM;
-			goto err_out;
-		}
-		if (copy_from_user(buffer, rio_cmd.buffer, rio_cmd.length)) {
-			free_page((unsigned long)buffer);
-			retval = -EFAULT;
-			goto err_out;
-		}
-
-		requesttype = rio_cmd.requesttype | USB_DIR_OUT |
-		    USB_TYPE_VENDOR | USB_RECIP_DEVICE;
-		dev_dbg(&rio->rio_dev->dev,
-			"sending command: reqtype=%0x req=%0x value=%0x index=%0x len=%0x\n",
-			requesttype, rio_cmd.request, rio_cmd.value,
-			rio_cmd.index, rio_cmd.length);
-		/* Send rio control message */
-		retries = 3;
-		while (retries) {
-			result = usb_control_msg(rio->rio_dev,
-						 usb_sndctrlpipe(rio-> rio_dev, 0),
-						 rio_cmd.request,
-						 requesttype,
-						 rio_cmd.value,
-						 rio_cmd.index, buffer,
-						 rio_cmd.length,
-						 jiffies_to_msecs(rio_cmd.timeout));
-			if (result == -ETIMEDOUT)
-				retries--;
-			else if (result < 0) {
-				dev_err(&rio->rio_dev->dev,
-					"Error executing ioctrl. code = %d\n",
-					result);
-				retries = 0;
-			} else {
-				dev_dbg(&rio->rio_dev->dev,
-					"Executed ioctl. Result = %d\n", result);
-				retries = 0;
-
-			}
-
-		}
-		free_page((unsigned long) buffer);
-		break;
-
-	default:
-		retval = -ENOTTY;
-		break;
-	}
-
-
-err_out:
-	mutex_unlock(&(rio->lock));
-	return retval;
-}
-
-static ssize_t
-write_rio(struct file *file, const char __user *buffer,
-	  size_t count, loff_t * ppos)
-{
-	DEFINE_WAIT(wait);
-	struct rio_usb_data *rio = &rio_instance;
-
-	unsigned long copy_size;
-	unsigned long bytes_written = 0;
-	unsigned int partial;
-
-	int result = 0;
-	int maxretry;
-	int errn = 0;
-	int intr;
-
-	intr = mutex_lock_interruptible(&(rio->lock));
-	if (intr)
-		return -EINTR;
-        /* Sanity check to make sure rio is connected, powered, etc */
-        if (rio->present == 0 || rio->rio_dev == NULL) {
-		mutex_unlock(&(rio->lock));
-		return -ENODEV;
-	}
-
-
-
-	do {
-		unsigned long thistime;
-		char *obuf = rio->obuf;
-
-		thistime = copy_size =
-		    (count >= OBUF_SIZE) ? OBUF_SIZE : count;
-		if (copy_from_user(rio->obuf, buffer, copy_size)) {
-			errn = -EFAULT;
-			goto error;
-		}
-		maxretry = 5;
-		while (thistime) {
-			if (!rio->rio_dev) {
-				errn = -ENODEV;
-				goto error;
-			}
-			if (signal_pending(current)) {
-				mutex_unlock(&(rio->lock));
-				return bytes_written ? bytes_written : -EINTR;
-			}
-
-			result = usb_bulk_msg(rio->rio_dev,
-					 usb_sndbulkpipe(rio->rio_dev, 2),
-					 obuf, thistime, &partial, 5000);
-
-			dev_dbg(&rio->rio_dev->dev,
-				"write stats: result:%d thistime:%lu partial:%u\n",
-				result, thistime, partial);
-
-			if (result == -ETIMEDOUT) {	/* NAK - so hold for a while */
-				if (!maxretry--) {
-					errn = -ETIME;
-					goto error;
-				}
-				prepare_to_wait(&rio->wait_q, &wait, TASK_INTERRUPTIBLE);
-				schedule_timeout(NAK_TIMEOUT);
-				finish_wait(&rio->wait_q, &wait);
-				continue;
-			} else if (!result && partial) {
-				obuf += partial;
-				thistime -= partial;
-			} else
-				break;
-		}
-		if (result) {
-			dev_err(&rio->rio_dev->dev, "Write Whoops - %x\n",
-				result);
-			errn = -EIO;
-			goto error;
-		}
-		bytes_written += copy_size;
-		count -= copy_size;
-		buffer += copy_size;
-	} while (count > 0);
-
-	mutex_unlock(&(rio->lock));
-
-	return bytes_written ? bytes_written : -EIO;
-
-error:
-	mutex_unlock(&(rio->lock));
-	return errn;
-}
-
-static ssize_t
-read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
-{
-	DEFINE_WAIT(wait);
-	struct rio_usb_data *rio = &rio_instance;
-	ssize_t read_count;
-	unsigned int partial;
-	int this_read;
-	int result;
-	int maxretry = 10;
-	char *ibuf;
-	int intr;
-
-	intr = mutex_lock_interruptible(&(rio->lock));
-	if (intr)
-		return -EINTR;
-	/* Sanity check to make sure rio is connected, powered, etc */
-        if (rio->present == 0 || rio->rio_dev == NULL) {
-		mutex_unlock(&(rio->lock));
-		return -ENODEV;
-	}
-
-	ibuf = rio->ibuf;
-
-	read_count = 0;
-
-
-	while (count > 0) {
-		if (signal_pending(current)) {
-			mutex_unlock(&(rio->lock));
-			return read_count ? read_count : -EINTR;
-		}
-		if (!rio->rio_dev) {
-			mutex_unlock(&(rio->lock));
-			return -ENODEV;
-		}
-		this_read = (count >= IBUF_SIZE) ? IBUF_SIZE : count;
-
-		result = usb_bulk_msg(rio->rio_dev,
-				      usb_rcvbulkpipe(rio->rio_dev, 1),
-				      ibuf, this_read, &partial,
-				      8000);
-
-		dev_dbg(&rio->rio_dev->dev,
-			"read stats: result:%d this_read:%u partial:%u\n",
-			result, this_read, partial);
-
-		if (partial) {
-			count = this_read = partial;
-		} else if (result == -ETIMEDOUT || result == 15) {	/* FIXME: 15 ??? */
-			if (!maxretry--) {
-				mutex_unlock(&(rio->lock));
-				dev_err(&rio->rio_dev->dev,
-					"read_rio: maxretry timeout\n");
-				return -ETIME;
-			}
-			prepare_to_wait(&rio->wait_q, &wait, TASK_INTERRUPTIBLE);
-			schedule_timeout(NAK_TIMEOUT);
-			finish_wait(&rio->wait_q, &wait);
-			continue;
-		} else if (result != -EREMOTEIO) {
-			mutex_unlock(&(rio->lock));
-			dev_err(&rio->rio_dev->dev,
-				"Read Whoops - result:%d partial:%u this_read:%u\n",
-				result, partial, this_read);
-			return -EIO;
-		} else {
-			mutex_unlock(&(rio->lock));
-			return (0);
-		}
-
-		if (this_read) {
-			if (copy_to_user(buffer, ibuf, this_read)) {
-				mutex_unlock(&(rio->lock));
-				return -EFAULT;
-			}
-			count -= this_read;
-			read_count += this_read;
-			buffer += this_read;
-		}
-	}
-	mutex_unlock(&(rio->lock));
-	return read_count;
-}
-
-static const struct file_operations usb_rio_fops = {
-	.owner =	THIS_MODULE,
-	.read =		read_rio,
-	.write =	write_rio,
-	.unlocked_ioctl = ioctl_rio,
-	.open =		open_rio,
-	.release =	close_rio,
-	.llseek =	noop_llseek,
-};
-
-static struct usb_class_driver usb_rio_class = {
-	.name =		"rio500%d",
-	.fops =		&usb_rio_fops,
-	.minor_base =	RIO_MINOR,
-};
-
-static int probe_rio(struct usb_interface *intf,
-		     const struct usb_device_id *id)
-{
-	struct usb_device *dev = interface_to_usbdev(intf);
-	struct rio_usb_data *rio = &rio_instance;
-	int retval = 0;
-
-	mutex_lock(&rio500_mutex);
-	if (rio->present) {
-		dev_info(&intf->dev, "Second USB Rio at address %d refused\n", dev->devnum);
-		retval = -EBUSY;
-		goto bail_out;
-	} else {
-		dev_info(&intf->dev, "USB Rio found at address %d\n", dev->devnum);
-	}
-
-	retval = usb_register_dev(intf, &usb_rio_class);
-	if (retval) {
-		dev_err(&dev->dev,
-			"Not able to get a minor for this device.\n");
-		retval = -ENOMEM;
-		goto bail_out;
-	}
-
-	rio->rio_dev = dev;
-
-	if (!(rio->obuf = kmalloc(OBUF_SIZE, GFP_KERNEL))) {
-		dev_err(&dev->dev,
-			"probe_rio: Not enough memory for the output buffer\n");
-		usb_deregister_dev(intf, &usb_rio_class);
-		retval = -ENOMEM;
-		goto bail_out;
-	}
-	dev_dbg(&intf->dev, "obuf address:%p\n", rio->obuf);
-
-	if (!(rio->ibuf = kmalloc(IBUF_SIZE, GFP_KERNEL))) {
-		dev_err(&dev->dev,
-			"probe_rio: Not enough memory for the input buffer\n");
-		usb_deregister_dev(intf, &usb_rio_class);
-		kfree(rio->obuf);
-		retval = -ENOMEM;
-		goto bail_out;
-	}
-	dev_dbg(&intf->dev, "ibuf address:%p\n", rio->ibuf);
-
-	mutex_init(&(rio->lock));
-
-	usb_set_intfdata (intf, rio);
-	rio->present = 1;
-bail_out:
-	mutex_unlock(&rio500_mutex);
-
-	return retval;
-}
-
-static void disconnect_rio(struct usb_interface *intf)
-{
-	struct rio_usb_data *rio = usb_get_intfdata (intf);
-
-	usb_set_intfdata (intf, NULL);
-	mutex_lock(&rio500_mutex);
-	if (rio) {
-		usb_deregister_dev(intf, &usb_rio_class);
-
-		mutex_lock(&(rio->lock));
-		if (rio->isopen) {
-			rio->isopen = 0;
-			/* better let it finish - the release will do whats needed */
-			rio->rio_dev = NULL;
-			mutex_unlock(&(rio->lock));
-			mutex_unlock(&rio500_mutex);
-			return;
-		}
-		kfree(rio->ibuf);
-		kfree(rio->obuf);
-
-		dev_info(&intf->dev, "USB Rio disconnected.\n");
-
-		rio->present = 0;
-		mutex_unlock(&(rio->lock));
-	}
-	mutex_unlock(&rio500_mutex);
-}
-
-static const struct usb_device_id rio_table[] = {
-	{ USB_DEVICE(0x0841, 1) }, 		/* Rio 500 */
-	{ }					/* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE (usb, rio_table);
-
-static struct usb_driver rio_driver = {
-	.name =		"rio500",
-	.probe =	probe_rio,
-	.disconnect =	disconnect_rio,
-	.id_table =	rio_table,
-};
-
-module_usb_driver(rio_driver);
-
-MODULE_AUTHOR( DRIVER_AUTHOR );
-MODULE_DESCRIPTION( DRIVER_DESC );
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/usb/misc/rio500_usb.h b/drivers/usb/misc/rio500_usb.h
deleted file mode 100644
index 6db7a58..0000000
--- a/drivers/usb/misc/rio500_usb.h
+++ /dev/null
@@ -1,20 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*  ----------------------------------------------------------------------
-    Copyright (C) 2000  Cesar Miquel  (miquel@df.uba.ar)
-    ---------------------------------------------------------------------- */
-
-#define RIO_SEND_COMMAND			0x1
-#define RIO_RECV_COMMAND			0x2
-
-#define RIO_DIR_OUT               	        0x0
-#define RIO_DIR_IN				0x1
-
-struct RioCommand {
-	short length;
-	int request;
-	int requesttype;
-	int value;
-	int index;
-	void __user *buffer;
-	int timeout;
-};
diff --git a/drivers/usb/misc/ssusb-redriver-nb7vpq904m.c b/drivers/usb/misc/ssusb-redriver-nb7vpq904m.c
index 39745c1..3d75aa1 100644
--- a/drivers/usb/misc/ssusb-redriver-nb7vpq904m.c
+++ b/drivers/usb/misc/ssusb-redriver-nb7vpq904m.c
@@ -1142,8 +1142,8 @@ static int __maybe_unused redriver_i2c_suspend(struct device *dev)
 			__func__);
 
 	/* Disable redriver chip when USB cable disconnected */
-	if ((!redriver->vbus_active)
-			&& (!redriver->host_active))
+	if (!redriver->vbus_active && !redriver->host_active &&
+	    redriver->op_mode != OP_MODE_DP)
 		ssusb_redriver_gen_dev_set(redriver, false);
 
 	flush_workqueue(redriver->redriver_wq);
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c
index 9ba4a4e..aa982d3 100644
--- a/drivers/usb/misc/usblcd.c
+++ b/drivers/usb/misc/usblcd.c
@@ -18,6 +18,7 @@
 #include <linux/slab.h>
 #include <linux/errno.h>
 #include <linux/mutex.h>
+#include <linux/rwsem.h>
 #include <linux/uaccess.h>
 #include <linux/usb.h>
 
@@ -57,6 +58,8 @@ struct usb_lcd {
 							   using up all RAM */
 	struct usb_anchor	submitted;		/* URBs to wait for
 							   before suspend */
+	struct rw_semaphore	io_rwsem;
+	unsigned long		disconnected:1;
 };
 #define to_lcd_dev(d) container_of(d, struct usb_lcd, kref)
 
@@ -142,6 +145,13 @@ static ssize_t lcd_read(struct file *file, char __user * buffer,
 
 	dev = file->private_data;
 
+	down_read(&dev->io_rwsem);
+
+	if (dev->disconnected) {
+		retval = -ENODEV;
+		goto out_up_io;
+	}
+
 	/* do a blocking bulk read to get data from the device */
 	retval = usb_bulk_msg(dev->udev,
 			      usb_rcvbulkpipe(dev->udev,
@@ -158,6 +168,9 @@ static ssize_t lcd_read(struct file *file, char __user * buffer,
 			retval = bytes_read;
 	}
 
+out_up_io:
+	up_read(&dev->io_rwsem);
+
 	return retval;
 }
 
@@ -237,11 +250,18 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer,
 	if (r < 0)
 		return -EINTR;
 
+	down_read(&dev->io_rwsem);
+
+	if (dev->disconnected) {
+		retval = -ENODEV;
+		goto err_up_io;
+	}
+
 	/* create a urb, and a buffer for it, and copy the data to the urb */
 	urb = usb_alloc_urb(0, GFP_KERNEL);
 	if (!urb) {
 		retval = -ENOMEM;
-		goto err_no_buf;
+		goto err_up_io;
 	}
 
 	buf = usb_alloc_coherent(dev->udev, count, GFP_KERNEL,
@@ -278,6 +298,7 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer,
 	   the USB core will eventually free it entirely */
 	usb_free_urb(urb);
 
+	up_read(&dev->io_rwsem);
 exit:
 	return count;
 error_unanchor:
@@ -285,7 +306,8 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer,
 error:
 	usb_free_coherent(dev->udev, count, buf, urb->transfer_dma);
 	usb_free_urb(urb);
-err_no_buf:
+err_up_io:
+	up_read(&dev->io_rwsem);
 	up(&dev->limit_sem);
 	return retval;
 }
@@ -325,6 +347,7 @@ static int lcd_probe(struct usb_interface *interface,
 
 	kref_init(&dev->kref);
 	sema_init(&dev->limit_sem, USB_LCD_CONCURRENT_WRITES);
+	init_rwsem(&dev->io_rwsem);
 	init_usb_anchor(&dev->submitted);
 
 	dev->udev = usb_get_dev(interface_to_usbdev(interface));
@@ -422,6 +445,12 @@ static void lcd_disconnect(struct usb_interface *interface)
 	/* give back our minor */
 	usb_deregister_dev(interface, &lcd_class);
 
+	down_write(&dev->io_rwsem);
+	dev->disconnected = 1;
+	up_write(&dev->io_rwsem);
+
+	usb_kill_anchored_urbs(&dev->submitted);
+
 	/* decrement our usage count */
 	kref_put(&dev->kref, lcd_delete);
 
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 6715a12..be0505b 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -60,6 +60,7 @@ struct usb_yurex {
 
 	struct kref		kref;
 	struct mutex		io_mutex;
+	unsigned long		disconnected:1;
 	struct fasync_struct	*async_queue;
 	wait_queue_head_t	waitq;
 
@@ -107,6 +108,7 @@ static void yurex_delete(struct kref *kref)
 				dev->int_buffer, dev->urb->transfer_dma);
 		usb_free_urb(dev->urb);
 	}
+	usb_put_intf(dev->interface);
 	usb_put_dev(dev->udev);
 	kfree(dev);
 }
@@ -132,6 +134,7 @@ static void yurex_interrupt(struct urb *urb)
 	switch (status) {
 	case 0: /*success*/
 		break;
+	/* The device is terminated or messed up, give up */
 	case -EOVERFLOW:
 		dev_err(&dev->interface->dev,
 			"%s - overflow with length %d, actual length is %d\n",
@@ -140,12 +143,13 @@ static void yurex_interrupt(struct urb *urb)
 	case -ENOENT:
 	case -ESHUTDOWN:
 	case -EILSEQ:
-		/* The device is terminated, clean up */
+	case -EPROTO:
+	case -ETIME:
 		return;
 	default:
 		dev_err(&dev->interface->dev,
 			"%s - unknown status received: %d\n", __func__, status);
-		goto exit;
+		return;
 	}
 
 	/* handle received message */
@@ -177,7 +181,6 @@ static void yurex_interrupt(struct urb *urb)
 		break;
 	}
 
-exit:
 	retval = usb_submit_urb(dev->urb, GFP_ATOMIC);
 	if (retval) {
 		dev_err(&dev->interface->dev, "%s - usb_submit_urb failed: %d\n",
@@ -204,7 +207,7 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_
 	init_waitqueue_head(&dev->waitq);
 
 	dev->udev = usb_get_dev(interface_to_usbdev(interface));
-	dev->interface = interface;
+	dev->interface = usb_get_intf(interface);
 
 	/* set up the endpoint information */
 	iface_desc = interface->cur_altsetting;
@@ -315,8 +318,9 @@ static void yurex_disconnect(struct usb_interface *interface)
 
 	/* prevent more I/O from starting */
 	usb_poison_urb(dev->urb);
+	usb_poison_urb(dev->cntl_urb);
 	mutex_lock(&dev->io_mutex);
-	dev->interface = NULL;
+	dev->disconnected = 1;
 	mutex_unlock(&dev->io_mutex);
 
 	/* wakeup waiters */
@@ -404,7 +408,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
 	dev = file->private_data;
 
 	mutex_lock(&dev->io_mutex);
-	if (!dev->interface) {		/* already disconnected */
+	if (dev->disconnected) {		/* already disconnected */
 		mutex_unlock(&dev->io_mutex);
 		return -ENODEV;
 	}
@@ -439,7 +443,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
 		goto error;
 
 	mutex_lock(&dev->io_mutex);
-	if (!dev->interface) {		/* already disconnected */
+	if (dev->disconnected) {		/* already disconnected */
 		mutex_unlock(&dev->io_mutex);
 		retval = -ENODEV;
 		goto error;
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index dfd4c0b..df24403 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -1649,8 +1649,11 @@ static void handle_vdm_tx(struct usbpd *pd, enum pd_sop_type sop_type)
 	u32 vdm_hdr;
 	int ret;
 
-	if (!pd->vdm_tx)
+	mutex_lock(&pd->svid_handler_lock);
+	if (!pd->vdm_tx) {
+		mutex_unlock(&pd->svid_handler_lock);
 		return;
+	}
 
 	/* only send one VDM at a time */
 	vdm_hdr = pd->vdm_tx->data[0];
@@ -1664,6 +1667,7 @@ static void handle_vdm_tx(struct usbpd *pd, enum pd_sop_type sop_type)
 		pd->current_pr == PR_SRC && !in_src_ams(pd)) {
 		/* Set SinkTxNG and reschedule sm_work to send again */
 		start_src_ams(pd, true);
+		mutex_unlock(&pd->svid_handler_lock);
 		return;
 	}
 
@@ -1673,6 +1677,7 @@ static void handle_vdm_tx(struct usbpd *pd, enum pd_sop_type sop_type)
 		usbpd_err(&pd->dev, "Error (%d) sending VDM command %d\n",
 				ret, SVDM_HDR_CMD(pd->vdm_tx->data[0]));
 
+		mutex_unlock(&pd->svid_handler_lock);
 		/* retry when hitting PE_SRC/SNK_Ready again */
 		if (ret != -EBUSY && sop_type == SOP_MSG)
 			usbpd_set_state(pd, PE_SEND_SOFT_RESET);
@@ -1705,6 +1710,7 @@ static void handle_vdm_tx(struct usbpd *pd, enum pd_sop_type sop_type)
 	}
 
 	pd->vdm_tx = NULL;
+	mutex_unlock(&pd->svid_handler_lock);
 }
 
 static void reset_vdm_state(struct usbpd *pd)
@@ -1721,7 +1727,6 @@ static void reset_vdm_state(struct usbpd *pd)
 		}
 	}
 
-	mutex_unlock(&pd->svid_handler_lock);
 	pd->vdm_state = VDM_NONE;
 	kfree(pd->vdm_tx_retry);
 	pd->vdm_tx_retry = NULL;
@@ -1732,6 +1737,7 @@ static void reset_vdm_state(struct usbpd *pd)
 	pd->vdm_tx = NULL;
 	pd->ss_lane_svid = 0x0;
 	pd->vdm_in_suspend = false;
+	mutex_unlock(&pd->svid_handler_lock);
 }
 
 static void handle_get_src_cap_extended(struct usbpd *pd)
diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c
index c926494..4618752 100644
--- a/drivers/usb/phy/phy-msm-ssusb-qmp.c
+++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c
@@ -16,7 +16,6 @@
 #include <linux/clk.h>
 #include <linux/extcon.h>
 #include <linux/reset.h>
-#include <linux/hrtimer.h>
 
 enum core_ldo_levels {
 	CORE_LEVEL_NONE = 0,
@@ -73,9 +72,6 @@ enum core_ldo_levels {
 #define DP_MODE			BIT(1) /* enables DP mode */
 #define USB3_DP_COMBO_MODE	(USB3_MODE | DP_MODE) /*enables combo mode */
 
-/* USB3 Gen2 link training indicator */
-#define RX_EQUALIZATION_IN_PROGRESS	BIT(3)
-
 enum qmp_phy_rev_reg {
 	USB3_PHY_PCS_STATUS,
 	USB3_PHY_AUTONOMOUS_MODE_CTRL,
@@ -143,7 +139,6 @@ struct msm_ssphy_qmp {
 	int			reg_offset_cnt;
 	u32			*qmp_phy_init_seq;
 	int			init_seq_len;
-	struct hrtimer		timer;
 };
 
 static const struct of_device_id msm_usb_id_table[] = {
@@ -695,7 +690,6 @@ static int msm_ssphy_qmp_set_suspend(struct usb_phy *uphy, int suspend)
 		/* Make sure above write completed with PHY */
 		wmb();
 
-		hrtimer_cancel(&phy->timer);
 		msm_ssphy_qmp_enable_clks(phy, false);
 		phy->in_suspend = true;
 		msm_ssphy_power_enable(phy, 0);
@@ -720,74 +714,6 @@ static int msm_ssphy_qmp_set_suspend(struct usb_phy *uphy, int suspend)
 	return 0;
 }
 
-static enum hrtimer_restart timer_fn(struct hrtimer *timer)
-{
-	struct msm_ssphy_qmp *phy =
-		container_of(timer, struct msm_ssphy_qmp, timer);
-	u8 status2, status2_1, sw1, mx1, sw2, mx2;
-	int timeout = 15000;
-
-	status2_1 = sw1 = sw2 = mx1 = mx2 = 0;
-
-	status2 = readl_relaxed(phy->base +
-			phy->phy_reg[USB3_DP_PCS_PCS_STATUS2]);
-	if (status2 & RX_EQUALIZATION_IN_PROGRESS) {
-		while (timeout > 0) {
-			status2_1 = readl_relaxed(phy->base +
-					phy->phy_reg[USB3_DP_PCS_PCS_STATUS2]);
-			if (status2_1 & RX_EQUALIZATION_IN_PROGRESS) {
-				timeout -= 500;
-				udelay(500);
-				continue;
-			}
-
-			writel_relaxed(0x08, phy->base +
-				phy->phy_reg[USB3_DP_PCS_INSIG_SW_CTRL3]);
-			writel_relaxed(0x08, phy->base +
-				phy->phy_reg[USB3_DP_PCS_INSIG_MX_CTRL3]);
-			sw1 = readl_relaxed(phy->base +
-				phy->phy_reg[USB3_DP_PCS_INSIG_SW_CTRL3]);
-			mx1 = readl_relaxed(phy->base +
-				phy->phy_reg[USB3_DP_PCS_INSIG_MX_CTRL3]);
-			udelay(1);
-			writel_relaxed(0x0, phy->base +
-				phy->phy_reg[USB3_DP_PCS_INSIG_SW_CTRL3]);
-			writel_relaxed(0x0, phy->base +
-				phy->phy_reg[USB3_DP_PCS_INSIG_MX_CTRL3]);
-			sw2 = readl_relaxed(phy->base +
-				phy->phy_reg[USB3_DP_PCS_INSIG_SW_CTRL3]);
-			mx2 = readl_relaxed(phy->base +
-				phy->phy_reg[USB3_DP_PCS_INSIG_MX_CTRL3]);
-
-			break;
-		}
-	}
-
-	dev_dbg(phy->phy.dev,
-		"st=%x st2=%x sw1=%x sw2=%x mx1=%x mx2=%x timeout=%d\n",
-		status2, status2_1, sw1, sw2, mx1, mx2, timeout);
-
-	hrtimer_forward_now(timer, ms_to_ktime(1));
-
-	return HRTIMER_RESTART;
-}
-
-static int msm_ssphy_qmp_link_training(struct usb_phy *uphy, bool start)
-{
-	struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
-					phy);
-
-	if (start) {
-		hrtimer_start(&phy->timer, 0, HRTIMER_MODE_REL);
-		dev_dbg(uphy->dev, "link training start\n");
-	} else {
-		hrtimer_cancel(&phy->timer);
-		dev_dbg(uphy->dev, "link training stop\n");
-	}
-
-	return 0;
-}
-
 static int msm_ssphy_qmp_notify_connect(struct usb_phy *uphy,
 				       enum usb_device_speed speed)
 {
@@ -810,7 +736,6 @@ static int msm_ssphy_qmp_notify_disconnect(struct usb_phy *uphy,
 		phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
 	readl_relaxed(phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
 
-	hrtimer_cancel(&phy->timer);
 	dev_dbg(uphy->dev, "QMP phy disconnect notification\n");
 	dev_dbg(uphy->dev, " cable_connected=%d\n", phy->cable_connected);
 	phy->cable_connected = false;
@@ -1181,19 +1106,12 @@ static int msm_ssphy_qmp_probe(struct platform_device *pdev)
 	if (of_property_read_bool(dev->of_node, "qcom,vbus-valid-override"))
 		phy->phy.flags |= PHY_VBUS_VALID_OVERRIDE;
 
-	hrtimer_init(&phy->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-	phy->timer.function = timer_fn;
-
 	phy->phy.dev			= dev;
 	phy->phy.init			= msm_ssphy_qmp_init;
 	phy->phy.set_suspend		= msm_ssphy_qmp_set_suspend;
 	phy->phy.notify_connect		= msm_ssphy_qmp_notify_connect;
 	phy->phy.notify_disconnect	= msm_ssphy_qmp_notify_disconnect;
 
-	if (of_property_read_bool(dev->of_node, "qcom,link-training-reset"))
-		phy->phy.link_training	= msm_ssphy_qmp_link_training;
-
-
 	if (phy->phy.type == USB_PHY_TYPE_USB3_AND_DP)
 		phy->phy.reset		= msm_ssphy_qmp_dp_combo_reset;
 	else
diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
index 6137f79..c47b721 100644
--- a/drivers/usb/renesas_usbhs/common.h
+++ b/drivers/usb/renesas_usbhs/common.h
@@ -207,6 +207,7 @@ struct usbhs_priv;
 /* DCPCTR */
 #define BSTS		(1 << 15)	/* Buffer Status */
 #define SUREQ		(1 << 14)	/* Sending SETUP Token */
+#define INBUFM		(1 << 14)	/* (PIPEnCTR) Transfer Buffer Monitor */
 #define CSSTS		(1 << 12)	/* CSSTS Status */
 #define	ACLRM		(1 << 9)	/* Buffer Auto-Clear Mode */
 #define SQCLR		(1 << 8)	/* Toggle Bit Clear */
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 6036cba..aeb53ec 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -89,7 +89,7 @@ static void __usbhsf_pkt_del(struct usbhs_pkt *pkt)
 	list_del_init(&pkt->node);
 }
 
-static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
+struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
 {
 	return list_first_entry_or_null(&pipe->list, struct usbhs_pkt, node);
 }
diff --git a/drivers/usb/renesas_usbhs/fifo.h b/drivers/usb/renesas_usbhs/fifo.h
index 88d1816..c3d3cc3 100644
--- a/drivers/usb/renesas_usbhs/fifo.h
+++ b/drivers/usb/renesas_usbhs/fifo.h
@@ -97,5 +97,6 @@ void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
 		    void *buf, int len, int zero, int sequence);
 struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt);
 void usbhs_pkt_start(struct usbhs_pipe *pipe);
+struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe);
 
 #endif /* RENESAS_USB_FIFO_H */
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 59cac40..7feac41 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -721,8 +721,7 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge)
 	struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
 	struct device *dev = usbhsg_gpriv_to_dev(gpriv);
 	unsigned long flags;
-
-	usbhsg_pipe_disable(uep);
+	int ret = 0;
 
 	dev_dbg(dev, "set halt %d (pipe %d)\n",
 		halt, usbhs_pipe_number(pipe));
@@ -730,6 +729,18 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge)
 	/********************  spin lock ********************/
 	usbhs_lock(priv, flags);
 
+	/*
+	 * According to usb_ep_set_halt()'s description, this function should
+	 * return -EAGAIN if the IN endpoint has any queue or data. Note
+	 * that the usbhs_pipe_is_dir_in() returns false if the pipe is an
+	 * IN endpoint in the gadget mode.
+	 */
+	if (!usbhs_pipe_is_dir_in(pipe) && (__usbhsf_pkt_get(pipe) ||
+	    usbhs_pipe_contains_transmittable_data(pipe))) {
+		ret = -EAGAIN;
+		goto out;
+	}
+
 	if (halt)
 		usbhs_pipe_stall(pipe);
 	else
@@ -740,10 +751,11 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge)
 	else
 		usbhsg_status_clr(gpriv, USBHSG_STATUS_WEDGE);
 
+out:
 	usbhs_unlock(priv, flags);
 	/********************  spin unlock ******************/
 
-	return 0;
+	return ret;
 }
 
 static int usbhsg_ep_set_halt(struct usb_ep *ep, int value)
diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c
index c4922b9..9e5afdd 100644
--- a/drivers/usb/renesas_usbhs/pipe.c
+++ b/drivers/usb/renesas_usbhs/pipe.c
@@ -277,6 +277,21 @@ int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe)
 	return -EBUSY;
 }
 
+bool usbhs_pipe_contains_transmittable_data(struct usbhs_pipe *pipe)
+{
+	u16 val;
+
+	/* Do not support for DCP pipe */
+	if (usbhs_pipe_is_dcp(pipe))
+		return false;
+
+	val = usbhsp_pipectrl_get(pipe);
+	if (val & INBUFM)
+		return true;
+
+	return false;
+}
+
 /*
  *		PID ctrl
  */
diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h
index 3080423..3b13052 100644
--- a/drivers/usb/renesas_usbhs/pipe.h
+++ b/drivers/usb/renesas_usbhs/pipe.h
@@ -83,6 +83,7 @@ void usbhs_pipe_clear(struct usbhs_pipe *pipe);
 void usbhs_pipe_clear_without_sequence(struct usbhs_pipe *pipe,
 				       int needs_bfre, int bfre_enable);
 int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe);
+bool usbhs_pipe_contains_transmittable_data(struct usbhs_pipe *pipe);
 void usbhs_pipe_enable(struct usbhs_pipe *pipe);
 void usbhs_pipe_disable(struct usbhs_pipe *pipe);
 void usbhs_pipe_stall(struct usbhs_pipe *pipe);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index e18735e..f06706e 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1020,6 +1020,9 @@ static const struct usb_device_id id_table_combined[] = {
 	/* EZPrototypes devices */
 	{ USB_DEVICE(EZPROTOTYPES_VID, HJELMSLUND_USB485_ISO_PID) },
 	{ USB_DEVICE_INTERFACE_NUMBER(UNJO_VID, UNJO_ISODEBUG_V1_PID, 1) },
+	/* Sienna devices */
+	{ USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) },
+	{ USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) },
 	{ }					/* Terminating entry */
 };
 
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index f12d806..22d6621 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -39,6 +39,9 @@
 
 #define FTDI_LUMEL_PD12_PID	0x6002
 
+/* Sienna Serial Interface by Secyourit GmbH */
+#define FTDI_SIENNA_PID		0x8348
+
 /* Cyber Cortex AV by Fabulous Silicon (http://fabuloussilicon.com) */
 #define CYBER_CORTEX_AV_PID	0x8698
 
@@ -689,6 +692,12 @@
 #define BANDB_ZZ_PROG1_USB_PID	0xBA02
 
 /*
+ * Echelon USB Serial Interface
+ */
+#define ECHELON_VID		0x0920
+#define ECHELON_U20_PID		0x7500
+
+/*
  * Intrepid Control Systems (http://www.intrepidcs.com/) ValueCAN and NeoVI
  */
 #define INTREPID_VID		0x093C
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index d34779f..e66a59e 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -1741,8 +1741,8 @@ static struct urb *keyspan_setup_urb(struct usb_serial *serial, int endpoint,
 
 	ep_desc = find_ep(serial, endpoint);
 	if (!ep_desc) {
-		/* leak the urb, something's wrong and the callers don't care */
-		return urb;
+		usb_free_urb(urb);
+		return NULL;
 	}
 	if (usb_endpoint_xfer_int(ep_desc)) {
 		ep_type_name = "INT";
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 56f572c..3cc659a 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -419,6 +419,7 @@ static void option_instat_callback(struct urb *urb);
 #define CINTERION_PRODUCT_PH8_AUDIO		0x0083
 #define CINTERION_PRODUCT_AHXX_2RMNET		0x0084
 #define CINTERION_PRODUCT_AHXX_AUDIO		0x0085
+#define CINTERION_PRODUCT_CLS8			0x00b0
 
 /* Olivetti products */
 #define OLIVETTI_VENDOR_ID			0x0b3c
@@ -1154,6 +1155,14 @@ static const struct usb_device_id option_ids[] = {
 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
 	  .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
+	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1050, 0xff),	/* Telit FN980 (rmnet) */
+	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1051, 0xff),	/* Telit FN980 (MBIM) */
+	  .driver_info = NCTRL(0) | RSVD(1) },
+	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1052, 0xff),	/* Telit FN980 (RNDIS) */
+	  .driver_info = NCTRL(2) | RSVD(3) },
+	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1053, 0xff),	/* Telit FN980 (ECM) */
+	  .driver_info = NCTRL(0) | RSVD(1) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -1847,6 +1856,8 @@ static const struct usb_device_id option_ids[] = {
 	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) },
 	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
+	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_CLS8, 0xff),
+	  .driver_info = RSVD(0) | RSVD(4) },
 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
 	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index e3c5832..c9201e0 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -776,7 +776,6 @@ static void ti_close(struct usb_serial_port *port)
 	struct ti_port *tport;
 	int port_number;
 	int status;
-	int do_unlock;
 	unsigned long flags;
 
 	tdev = usb_get_serial_data(port->serial);
@@ -800,16 +799,13 @@ static void ti_close(struct usb_serial_port *port)
 			"%s - cannot send close port command, %d\n"
 							, __func__, status);
 
-	/* if mutex_lock is interrupted, continue anyway */
-	do_unlock = !mutex_lock_interruptible(&tdev->td_open_close_lock);
+	mutex_lock(&tdev->td_open_close_lock);
 	--tport->tp_tdev->td_open_port_count;
-	if (tport->tp_tdev->td_open_port_count <= 0) {
+	if (tport->tp_tdev->td_open_port_count == 0) {
 		/* last port is closed, shut down interrupt urb */
 		usb_kill_urb(port->serial->port[0]->interrupt_in_urb);
-		tport->tp_tdev->td_open_port_count = 0;
 	}
-	if (do_unlock)
-		mutex_unlock(&tdev->td_open_close_lock);
+	mutex_unlock(&tdev->td_open_close_lock);
 }
 
 
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index f7aaa7f..4341537 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -311,10 +311,7 @@ static void serial_cleanup(struct tty_struct *tty)
 	serial = port->serial;
 	owner = serial->type->driver.owner;
 
-	mutex_lock(&serial->disc_mutex);
-	if (!serial->disconnected)
-		usb_autopm_put_interface(serial->interface);
-	mutex_unlock(&serial->disc_mutex);
+	usb_autopm_put_interface(serial->interface);
 
 	usb_serial_put(serial);
 	module_put(owner);
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index f101347..e0cf11f 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -59,6 +59,7 @@ struct usb_skel {
 	spinlock_t		err_lock;		/* lock for errors */
 	struct kref		kref;
 	struct mutex		io_mutex;		/* synchronize I/O with disconnect */
+	unsigned long		disconnected:1;
 	wait_queue_head_t	bulk_in_wait;		/* to wait for an ongoing read */
 };
 #define to_skel_dev(d) container_of(d, struct usb_skel, kref)
@@ -71,6 +72,7 @@ static void skel_delete(struct kref *kref)
 	struct usb_skel *dev = to_skel_dev(kref);
 
 	usb_free_urb(dev->bulk_in_urb);
+	usb_put_intf(dev->interface);
 	usb_put_dev(dev->udev);
 	kfree(dev->bulk_in_buffer);
 	kfree(dev);
@@ -122,10 +124,7 @@ static int skel_release(struct inode *inode, struct file *file)
 		return -ENODEV;
 
 	/* allow the device to be autosuspended */
-	mutex_lock(&dev->io_mutex);
-	if (dev->interface)
-		usb_autopm_put_interface(dev->interface);
-	mutex_unlock(&dev->io_mutex);
+	usb_autopm_put_interface(dev->interface);
 
 	/* decrement the count on our device */
 	kref_put(&dev->kref, skel_delete);
@@ -238,7 +237,7 @@ static ssize_t skel_read(struct file *file, char *buffer, size_t count,
 	if (rv < 0)
 		return rv;
 
-	if (!dev->interface) {		/* disconnect() was called */
+	if (dev->disconnected) {		/* disconnect() was called */
 		rv = -ENODEV;
 		goto exit;
 	}
@@ -420,7 +419,7 @@ static ssize_t skel_write(struct file *file, const char *user_buffer,
 
 	/* this lock makes sure we don't submit URBs to gone devices */
 	mutex_lock(&dev->io_mutex);
-	if (!dev->interface) {		/* disconnect() was called */
+	if (dev->disconnected) {		/* disconnect() was called */
 		mutex_unlock(&dev->io_mutex);
 		retval = -ENODEV;
 		goto error;
@@ -505,7 +504,7 @@ static int skel_probe(struct usb_interface *interface,
 	init_waitqueue_head(&dev->bulk_in_wait);
 
 	dev->udev = usb_get_dev(interface_to_usbdev(interface));
-	dev->interface = interface;
+	dev->interface = usb_get_intf(interface);
 
 	/* set up the endpoint information */
 	/* use only the first bulk-in and bulk-out endpoints */
@@ -571,7 +570,7 @@ static void skel_disconnect(struct usb_interface *interface)
 
 	/* prevent more I/O from starting */
 	mutex_lock(&dev->io_mutex);
-	dev->interface = NULL;
+	dev->disconnected = 1;
 	mutex_unlock(&dev->io_mutex);
 
 	usb_kill_anchored_urbs(&dev->submitted);
diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
index 1abe4d02..ffde179 100644
--- a/drivers/watchdog/aspeed_wdt.c
+++ b/drivers/watchdog/aspeed_wdt.c
@@ -38,6 +38,7 @@ static const struct aspeed_wdt_config ast2500_config = {
 static const struct of_device_id aspeed_wdt_of_table[] = {
 	{ .compatible = "aspeed,ast2400-wdt", .data = &ast2400_config },
 	{ .compatible = "aspeed,ast2500-wdt", .data = &ast2500_config },
+	{ .compatible = "aspeed,ast2600-wdt", .data = &ast2500_config },
 	{ },
 };
 MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
@@ -264,7 +265,8 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
 		set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
 	}
 
-	if (of_device_is_compatible(np, "aspeed,ast2500-wdt")) {
+	if ((of_device_is_compatible(np, "aspeed,ast2500-wdt")) ||
+		(of_device_is_compatible(np, "aspeed,ast2600-wdt"))) {
 		u32 reg = readl(wdt->base + WDT_RESET_WIDTH);
 
 		reg &= config->ext_pulse_width_mask;
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 7e7bdcb..9f3123b 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -55,7 +55,7 @@
 
 #define IMX2_WDT_WMCR		0x08		/* Misc Register */
 
-#define IMX2_WDT_MAX_TIME	128
+#define IMX2_WDT_MAX_TIME	128U
 #define IMX2_WDT_DEFAULT_TIME	60		/* in seconds */
 
 #define WDOG_SEC_TO_COUNT(s)	((s * 2 - 1) << 8)
@@ -180,7 +180,7 @@ static int imx2_wdt_set_timeout(struct watchdog_device *wdog,
 {
 	unsigned int actual;
 
-	actual = min(new_timeout, wdog->max_hw_heartbeat_ms * 1000);
+	actual = min(new_timeout, IMX2_WDT_MAX_TIME);
 	__imx2_wdt_set_timeout(wdog, actual);
 	wdog->timeout = new_timeout;
 	return 0;
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index 7494dbe..db58aaa 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -29,6 +29,8 @@
 #include "../pci/pci.h"
 #ifdef CONFIG_PCI_MMCONFIG
 #include <asm/pci_x86.h>
+
+static int xen_mcfg_late(void);
 #endif
 
 static bool __read_mostly pci_seg_supported = true;
@@ -40,7 +42,18 @@ static int xen_add_device(struct device *dev)
 #ifdef CONFIG_PCI_IOV
 	struct pci_dev *physfn = pci_dev->physfn;
 #endif
-
+#ifdef CONFIG_PCI_MMCONFIG
+	static bool pci_mcfg_reserved = false;
+	/*
+	 * Reserve MCFG areas in Xen on first invocation due to this being
+	 * potentially called from inside of acpi_init immediately after
+	 * MCFG table has been finally parsed.
+	 */
+	if (!pci_mcfg_reserved) {
+		xen_mcfg_late();
+		pci_mcfg_reserved = true;
+	}
+#endif
 	if (pci_seg_supported) {
 		struct {
 			struct physdev_pci_device_add add;
@@ -213,7 +226,7 @@ static int __init register_xen_pci_notifier(void)
 arch_initcall(register_xen_pci_notifier);
 
 #ifdef CONFIG_PCI_MMCONFIG
-static int __init xen_mcfg_late(void)
+static int xen_mcfg_late(void)
 {
 	struct pci_mmcfg_region *cfg;
 	int rc;
@@ -252,8 +265,4 @@ static int __init xen_mcfg_late(void)
 	}
 	return 0;
 }
-/*
- * Needs to be done after acpi_init which are subsys_initcall.
- */
-subsys_initcall_sync(xen_mcfg_late);
 #endif
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 39c6315..454c682 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -55,6 +55,7 @@
 #include <linux/string.h>
 #include <linux/slab.h>
 #include <linux/miscdevice.h>
+#include <linux/workqueue.h>
 
 #include <xen/xenbus.h>
 #include <xen/xen.h>
@@ -116,6 +117,8 @@ struct xenbus_file_priv {
 	wait_queue_head_t read_waitq;
 
 	struct kref kref;
+
+	struct work_struct wq;
 };
 
 /* Read out any raw xenbus messages queued up. */
@@ -300,14 +303,14 @@ static void watch_fired(struct xenbus_watch *watch,
 	mutex_unlock(&adap->dev_data->reply_mutex);
 }
 
-static void xenbus_file_free(struct kref *kref)
+static void xenbus_worker(struct work_struct *wq)
 {
 	struct xenbus_file_priv *u;
 	struct xenbus_transaction_holder *trans, *tmp;
 	struct watch_adapter *watch, *tmp_watch;
 	struct read_buffer *rb, *tmp_rb;
 
-	u = container_of(kref, struct xenbus_file_priv, kref);
+	u = container_of(wq, struct xenbus_file_priv, wq);
 
 	/*
 	 * No need for locking here because there are no other users,
@@ -333,6 +336,18 @@ static void xenbus_file_free(struct kref *kref)
 	kfree(u);
 }
 
+static void xenbus_file_free(struct kref *kref)
+{
+	struct xenbus_file_priv *u;
+
+	/*
+	 * We might be called in xenbus_thread().
+	 * Use workqueue to avoid deadlock.
+	 */
+	u = container_of(kref, struct xenbus_file_priv, kref);
+	schedule_work(&u->wq);
+}
+
 static struct xenbus_transaction_holder *xenbus_get_transaction(
 	struct xenbus_file_priv *u, uint32_t tx_id)
 {
@@ -652,6 +667,7 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
 	INIT_LIST_HEAD(&u->watches);
 	INIT_LIST_HEAD(&u->read_buffers);
 	init_waitqueue_head(&u->read_waitq);
+	INIT_WORK(&u->wq, xenbus_worker);
 
 	mutex_init(&u->reply_mutex);
 	mutex_init(&u->msgbuffer_mutex);
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 05454a7..550d0b1 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -528,6 +528,7 @@ v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
 	v9inode = V9FS_I(inode);
 	mutex_lock(&v9inode->v_mutex);
 	if (!v9inode->writeback_fid &&
+	    (vma->vm_flags & VM_SHARED) &&
 	    (vma->vm_flags & VM_WRITE)) {
 		/*
 		 * clone a fid and add it to writeback_fid
@@ -629,6 +630,8 @@ static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
 			(vma->vm_end - vma->vm_start - 1),
 	};
 
+	if (!(vma->vm_flags & VM_SHARED))
+		return;
 
 	p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
 
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index e49e292..72c7456 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -10000,6 +10000,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
 			btrfs_err(info,
 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
 				  cache->key.objectid);
+			btrfs_put_block_group(cache);
 			ret = -EINVAL;
 			goto error;
 		}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index c841865..4870440d 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2056,25 +2056,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 	struct btrfs_trans_handle *trans;
 	struct btrfs_log_ctx ctx;
 	int ret = 0, err;
-	u64 len;
 
-	/*
-	 * If the inode needs a full sync, make sure we use a full range to
-	 * avoid log tree corruption, due to hole detection racing with ordered
-	 * extent completion for adjacent ranges, and assertion failures during
-	 * hole detection.
-	 */
-	if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
-		     &BTRFS_I(inode)->runtime_flags)) {
-		start = 0;
-		end = LLONG_MAX;
-	}
-
-	/*
-	 * The range length can be represented by u64, we have to do the typecasts
-	 * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
-	 */
-	len = (u64)end - (u64)start + 1;
 	trace_btrfs_sync_file(file, datasync);
 
 	btrfs_init_log_ctx(&ctx, inode);
@@ -2101,6 +2083,19 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 	atomic_inc(&root->log_batch);
 
 	/*
+	 * If the inode needs a full sync, make sure we use a full range to
+	 * avoid log tree corruption, due to hole detection racing with ordered
+	 * extent completion for adjacent ranges, and assertion failures during
+	 * hole detection. Do this while holding the inode lock, to avoid races
+	 * with other tasks.
+	 */
+	if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+		     &BTRFS_I(inode)->runtime_flags)) {
+		start = 0;
+		end = LLONG_MAX;
+	}
+
+	/*
 	 * Before we acquired the inode's lock, someone may have dirtied more
 	 * pages in the target range. We need to make sure that writeback for
 	 * any such pages does not start while we are logging the inode, because
@@ -2127,8 +2122,11 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 	/*
 	 * We have to do this here to avoid the priority inversion of waiting on
 	 * IO of a lower priority task while holding a transaciton open.
+	 *
+	 * Also, the range length can be represented by u64, we have to do the
+	 * typecasts to avoid signed overflow if it's [0, LLONG_MAX].
 	 */
-	ret = btrfs_wait_ordered_range(inode, start, len);
+	ret = btrfs_wait_ordered_range(inode, start, (u64)end - (u64)start + 1);
 	if (ret) {
 		up_write(&BTRFS_I(inode)->dio_sem);
 		inode_unlock(inode);
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index e5b9e59..cd2a586 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -511,7 +511,7 @@ static int process_leaf(struct btrfs_root *root,
 	struct btrfs_extent_data_ref *dref;
 	struct btrfs_shared_data_ref *sref;
 	u32 count;
-	int i = 0, tree_block_level = 0, ret;
+	int i = 0, tree_block_level = 0, ret = 0;
 	struct btrfs_key key;
 	int nritems = btrfs_header_nritems(leaf);
 
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 5d57ed6..bccd9de 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -3187,6 +3187,8 @@ static int relocate_file_extent_cluster(struct inode *inode,
 			if (!page) {
 				btrfs_delalloc_release_metadata(BTRFS_I(inode),
 							PAGE_SIZE, true);
+				btrfs_delalloc_release_extents(BTRFS_I(inode),
+							PAGE_SIZE, true);
 				ret = -ENOMEM;
 				goto out;
 			}
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index e561eb4..4d4f57f 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2860,7 +2860,8 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
  * in the tree of log roots
  */
 static int update_log_root(struct btrfs_trans_handle *trans,
-			   struct btrfs_root *log)
+			   struct btrfs_root *log,
+			   struct btrfs_root_item *root_item)
 {
 	struct btrfs_fs_info *fs_info = log->fs_info;
 	int ret;
@@ -2868,10 +2869,10 @@ static int update_log_root(struct btrfs_trans_handle *trans,
 	if (log->log_transid == 1) {
 		/* insert root item on the first sync */
 		ret = btrfs_insert_root(trans, fs_info->log_root_tree,
-				&log->root_key, &log->root_item);
+				&log->root_key, root_item);
 	} else {
 		ret = btrfs_update_root(trans, fs_info->log_root_tree,
-				&log->root_key, &log->root_item);
+				&log->root_key, root_item);
 	}
 	return ret;
 }
@@ -2969,6 +2970,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
 	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_root *log = root->log_root;
 	struct btrfs_root *log_root_tree = fs_info->log_root_tree;
+	struct btrfs_root_item new_root_item;
 	int log_transid = 0;
 	struct btrfs_log_ctx root_log_ctx;
 	struct blk_plug plug;
@@ -3032,18 +3034,26 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
 		goto out;
 	}
 
+	/*
+	 * We _must_ update under the root->log_mutex in order to make sure we
+	 * have a consistent view of the log root we are trying to commit at
+	 * this moment.
+	 *
+	 * We _must_ copy this into a local copy, because we are not holding the
+	 * log_root_tree->log_mutex yet.  This is important because when we
+	 * commit the log_root_tree we must have a consistent view of the
+	 * log_root_tree when we update the super block to point at the
+	 * log_root_tree bytenr.  If we update the log_root_tree here we'll race
+	 * with the commit and possibly point at the new block which we may not
+	 * have written out.
+	 */
 	btrfs_set_root_node(&log->root_item, log->node);
+	memcpy(&new_root_item, &log->root_item, sizeof(new_root_item));
 
 	root->log_transid++;
 	log->log_transid = root->log_transid;
 	root->log_start_pid = 0;
 	/*
-	 * Update or create log root item under the root's log_mutex to prevent
-	 * races with concurrent log syncs that can lead to failure to update
-	 * log root item because it was not created yet.
-	 */
-	ret = update_log_root(trans, log);
-	/*
 	 * IO has been started, blocks of the log tree have WRITTEN flag set
 	 * in their headers. new modifications of the log will be written to
 	 * new positions. so it's safe to allow log writers to go in.
@@ -3063,6 +3073,14 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
 	mutex_unlock(&log_root_tree->log_mutex);
 
 	mutex_lock(&log_root_tree->log_mutex);
+
+	/*
+	 * Now we are safe to update the log_root_tree because we're under the
+	 * log_mutex, and we're a current writer so we're holding the commit
+	 * open until we drop the log_mutex.
+	 */
+	ret = update_log_root(trans, log, &new_root_item);
+
 	if (atomic_dec_and_test(&log_root_tree->log_writers)) {
 		/* atomic_dec_and_test implies a barrier */
 		cond_wake_up_nomb(&log_root_tree->log_writer_wait);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index c068452..8196c21 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -807,7 +807,12 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
 
 	/* update inode */
 	inode->i_rdev = le32_to_cpu(info->rdev);
-	inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
+	/* directories have fl_stripe_unit set to zero */
+	if (le32_to_cpu(info->layout.fl_stripe_unit))
+		inode->i_blkbits =
+			fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
+	else
+		inode->i_blkbits = CEPH_BLOCK_SHIFT;
 
 	__ceph_update_quota(ci, iinfo->max_bytes, iinfo->max_files);
 
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index bfcf11c..09db6d0 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -3640,7 +3640,9 @@ static void delayed_work(struct work_struct *work)
 				pr_info("mds%d hung\n", s->s_mds);
 			}
 		}
-		if (s->s_state < CEPH_MDS_SESSION_OPEN) {
+		if (s->s_state == CEPH_MDS_SESSION_NEW ||
+		    s->s_state == CEPH_MDS_SESSION_RESTARTING ||
+		    s->s_state == CEPH_MDS_SESSION_REJECTED) {
 			/* this mds is failed or recovering, just wait */
 			ceph_put_mds_session(s);
 			continue;
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 907e85d..2fb6fa5 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -840,10 +840,16 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
 static int
 cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
 {
+	struct inode *inode;
+
 	if (flags & LOOKUP_RCU)
 		return -ECHILD;
 
 	if (d_really_is_positive(direntry)) {
+		inode = d_inode(direntry);
+		if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode)))
+			CIFS_I(inode)->time = 0; /* force reval */
+
 		if (cifs_revalidate_dentry(direntry))
 			return 0;
 		else {
@@ -854,7 +860,7 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
 			 * attributes will have been updated by
 			 * cifs_revalidate_dentry().
 			 */
-			if (IS_AUTOMOUNT(d_inode(direntry)) &&
+			if (IS_AUTOMOUNT(inode) &&
 			   !(direntry->d_flags & DCACHE_NEED_AUTOMOUNT)) {
 				spin_lock(&direntry->d_lock);
 				direntry->d_flags |= DCACHE_NEED_AUTOMOUNT;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 8703b5f..b4e33ef 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -252,6 +252,12 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
 		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
 					 xid, fid);
 
+	if (rc) {
+		server->ops->close(xid, tcon, fid);
+		if (rc == -ESTALE)
+			rc = -EOPENSTALE;
+	}
+
 out:
 	kfree(buf);
 	return rc;
@@ -397,10 +403,11 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
 	bool oplock_break_cancelled;
 
 	spin_lock(&tcon->open_file_lock);
-
+	spin_lock(&cifsi->open_file_lock);
 	spin_lock(&cifs_file->file_info_lock);
 	if (--cifs_file->count > 0) {
 		spin_unlock(&cifs_file->file_info_lock);
+		spin_unlock(&cifsi->open_file_lock);
 		spin_unlock(&tcon->open_file_lock);
 		return;
 	}
@@ -413,9 +420,7 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
 	cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
 
 	/* remove it from the lists */
-	spin_lock(&cifsi->open_file_lock);
 	list_del(&cifs_file->flist);
-	spin_unlock(&cifsi->open_file_lock);
 	list_del(&cifs_file->tlist);
 
 	if (list_empty(&cifsi->openFileList)) {
@@ -431,6 +436,7 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
 		cifs_set_oplock_level(cifsi, 0);
 	}
 
+	spin_unlock(&cifsi->open_file_lock);
 	spin_unlock(&tcon->open_file_lock);
 
 	oplock_break_cancelled = wait_oplock_handler ?
@@ -1835,13 +1841,12 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
 {
 	struct cifsFileInfo *open_file = NULL;
 	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
-	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
 
 	/* only filter by fsuid on multiuser mounts */
 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
 		fsuid_only = false;
 
-	spin_lock(&tcon->open_file_lock);
+	spin_lock(&cifs_inode->open_file_lock);
 	/* we could simply get the first_list_entry since write-only entries
 	   are always at the end of the list but since the first entry might
 	   have a close pending, we go through the whole list */
@@ -1853,7 +1858,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
 				/* found a good file */
 				/* lock it so it will not be closed on us */
 				cifsFileInfo_get(open_file);
-				spin_unlock(&tcon->open_file_lock);
+				spin_unlock(&cifs_inode->open_file_lock);
 				return open_file;
 			} /* else might as well continue, and look for
 			     another, or simply have the caller reopen it
@@ -1861,7 +1866,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
 		} else /* write only file */
 			break; /* write only files are last so must be done */
 	}
-	spin_unlock(&tcon->open_file_lock);
+	spin_unlock(&cifs_inode->open_file_lock);
 	return NULL;
 }
 
@@ -1870,7 +1875,6 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
 {
 	struct cifsFileInfo *open_file, *inv_file = NULL;
 	struct cifs_sb_info *cifs_sb;
-	struct cifs_tcon *tcon;
 	bool any_available = false;
 	int rc;
 	unsigned int refind = 0;
@@ -1886,16 +1890,15 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
 	}
 
 	cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
-	tcon = cifs_sb_master_tcon(cifs_sb);
 
 	/* only filter by fsuid on multiuser mounts */
 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
 		fsuid_only = false;
 
-	spin_lock(&tcon->open_file_lock);
+	spin_lock(&cifs_inode->open_file_lock);
 refind_writable:
 	if (refind > MAX_REOPEN_ATT) {
-		spin_unlock(&tcon->open_file_lock);
+		spin_unlock(&cifs_inode->open_file_lock);
 		return NULL;
 	}
 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
@@ -1907,7 +1910,7 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
 			if (!open_file->invalidHandle) {
 				/* found a good writable file */
 				cifsFileInfo_get(open_file);
-				spin_unlock(&tcon->open_file_lock);
+				spin_unlock(&cifs_inode->open_file_lock);
 				return open_file;
 			} else {
 				if (!inv_file)
@@ -1926,7 +1929,7 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
 		cifsFileInfo_get(inv_file);
 	}
 
-	spin_unlock(&tcon->open_file_lock);
+	spin_unlock(&cifs_inode->open_file_lock);
 
 	if (inv_file) {
 		rc = cifs_reopen_file(inv_file, false);
@@ -1940,7 +1943,7 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
 			cifsFileInfo_put(inv_file);
 			++refind;
 			inv_file = NULL;
-			spin_lock(&tcon->open_file_lock);
+			spin_lock(&cifs_inode->open_file_lock);
 			goto refind_writable;
 		}
 	}
@@ -4001,17 +4004,15 @@ static int cifs_readpage(struct file *file, struct page *page)
 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
 {
 	struct cifsFileInfo *open_file;
-	struct cifs_tcon *tcon =
-		cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
 
-	spin_lock(&tcon->open_file_lock);
+	spin_lock(&cifs_inode->open_file_lock);
 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
-			spin_unlock(&tcon->open_file_lock);
+			spin_unlock(&cifs_inode->open_file_lock);
 			return 1;
 		}
 	}
-	spin_unlock(&tcon->open_file_lock);
+	spin_unlock(&cifs_inode->open_file_lock);
 	return 0;
 }
 
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 53f3d08..26154db 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -410,6 +410,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
 		/* if uniqueid is different, return error */
 		if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
 		    CIFS_I(*pinode)->uniqueid != fattr.cf_uniqueid)) {
+			CIFS_I(*pinode)->time = 0; /* force reval */
 			rc = -ESTALE;
 			goto cgiiu_exit;
 		}
@@ -417,6 +418,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
 		/* if filetype is different, return error */
 		if (unlikely(((*pinode)->i_mode & S_IFMT) !=
 		    (fattr.cf_mode & S_IFMT))) {
+			CIFS_I(*pinode)->time = 0; /* force reval */
 			rc = -ESTALE;
 			goto cgiiu_exit;
 		}
@@ -926,6 +928,7 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
 		/* if uniqueid is different, return error */
 		if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
 		    CIFS_I(*inode)->uniqueid != fattr.cf_uniqueid)) {
+			CIFS_I(*inode)->time = 0; /* force reval */
 			rc = -ESTALE;
 			goto cgii_exit;
 		}
@@ -933,6 +936,7 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
 		/* if filetype is different, return error */
 		if (unlikely(((*inode)->i_mode & S_IFMT) !=
 		    (fattr.cf_mode & S_IFMT))) {
+			CIFS_I(*inode)->time = 0; /* force reval */
 			rc = -ESTALE;
 			goto cgii_exit;
 		}
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 47db8eb..c7f0c85 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -183,6 +183,9 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
 	/* we do not want to loop forever */
 	last_mid = cur_mid;
 	cur_mid++;
+	/* avoid 0xFFFF MID */
+	if (cur_mid == 0xffff)
+		cur_mid++;
 
 	/*
 	 * This nested loop looks more expensive than it is.
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 0e0a548..32dc7a5 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -2478,11 +2478,11 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
 		}
 	}
 
-	if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
+	if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) {
 		f2fs_msg(sb, KERN_INFO,
 			"Magic Mismatch, valid(0x%x) - read(0x%x)",
 			F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
-		return 1;
+		return -EINVAL;
 	}
 
 	/* Currently, support only 4KB page cache size */
@@ -2490,7 +2490,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
 		f2fs_msg(sb, KERN_INFO,
 			"Invalid page_cache_size (%lu), supports only 4KB",
 			PAGE_SIZE);
-		return 1;
+		return -EFSCORRUPTED;
 	}
 
 	/* Currently, support only 4KB block size */
@@ -2499,7 +2499,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
 		f2fs_msg(sb, KERN_INFO,
 			"Invalid blocksize (%u), supports only 4KB",
 			blocksize);
-		return 1;
+		return -EFSCORRUPTED;
 	}
 
 	/* check log blocks per segment */
@@ -2507,7 +2507,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
 		f2fs_msg(sb, KERN_INFO,
 			"Invalid log blocks per segment (%u)",
 			le32_to_cpu(raw_super->log_blocks_per_seg));
-		return 1;
+		return -EFSCORRUPTED;
 	}
 
 	/* Currently, support 512/1024/2048/4096 bytes sector size */
@@ -2517,7 +2517,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
 				F2FS_MIN_LOG_SECTOR_SIZE) {
 		f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize (%u)",
 			le32_to_cpu(raw_super->log_sectorsize));
-		return 1;
+		return -EFSCORRUPTED;
 	}
 	if (le32_to_cpu(raw_super->log_sectors_per_block) +
 		le32_to_cpu(raw_super->log_sectorsize) !=
@@ -2526,7 +2526,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
 			"Invalid log sectors per block(%u) log sectorsize(%u)",
 			le32_to_cpu(raw_super->log_sectors_per_block),
 			le32_to_cpu(raw_super->log_sectorsize));
-		return 1;
+		return -EFSCORRUPTED;
 	}
 
 	segment_count = le32_to_cpu(raw_super->segment_count);
@@ -2542,7 +2542,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
 		f2fs_msg(sb, KERN_INFO,
 			"Invalid segment count (%u)",
 			segment_count);
-		return 1;
+		return -EFSCORRUPTED;
 	}
 
 	if (total_sections > segment_count ||
@@ -2551,28 +2551,28 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
 		f2fs_msg(sb, KERN_INFO,
 			"Invalid segment/section count (%u, %u x %u)",
 			segment_count, total_sections, segs_per_sec);
-		return 1;
+		return -EFSCORRUPTED;
 	}
 
 	if ((segment_count / segs_per_sec) < total_sections) {
 		f2fs_msg(sb, KERN_INFO,
 			"Small segment_count (%u < %u * %u)",
 			segment_count, segs_per_sec, total_sections);
-		return 1;
+		return -EFSCORRUPTED;
 	}
 
 	if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
 		f2fs_msg(sb, KERN_INFO,
 			"Wrong segment_count / block_count (%u > %llu)",
 			segment_count, le64_to_cpu(raw_super->block_count));
-		return 1;
+		return -EFSCORRUPTED;
 	}
 
 	if (secs_per_zone > total_sections || !secs_per_zone) {
 		f2fs_msg(sb, KERN_INFO,
 			"Wrong secs_per_zone / total_sections (%u, %u)",
 			secs_per_zone, total_sections);
-		return 1;
+		return -EFSCORRUPTED;
 	}
 	if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
 			raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
@@ -2583,7 +2583,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
 			le32_to_cpu(raw_super->extension_count),
 			raw_super->hot_ext_count,
 			F2FS_MAX_EXTENSION);
-		return 1;
+		return -EFSCORRUPTED;
 	}
 
 	if (le32_to_cpu(raw_super->cp_payload) >
@@ -2592,7 +2592,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
 			"Insane cp_payload (%u > %u)",
 			le32_to_cpu(raw_super->cp_payload),
 			blocks_per_seg - F2FS_CP_PACKS);
-		return 1;
+		return -EFSCORRUPTED;
 	}
 
 	/* check reserved ino info */
@@ -2604,12 +2604,12 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
 			le32_to_cpu(raw_super->node_ino),
 			le32_to_cpu(raw_super->meta_ino),
 			le32_to_cpu(raw_super->root_ino));
-		return 1;
+		return -EFSCORRUPTED;
 	}
 
 	/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
 	if (sanity_check_area_boundary(sbi, bh))
-		return 1;
+		return -EFSCORRUPTED;
 
 	return 0;
 }
@@ -2925,11 +2925,11 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
 		}
 
 		/* sanity checking of raw super */
-		if (sanity_check_raw_super(sbi, bh)) {
+		err = sanity_check_raw_super(sbi, bh);
+		if (err) {
 			f2fs_msg(sb, KERN_ERR,
 				"Can't find valid F2FS filesystem in %dth superblock",
 				block + 1);
-			err = -EFSCORRUPTED;
 			brelse(bh);
 			continue;
 		}
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index 8f68181..f057c21 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -518,6 +518,7 @@ static int cuse_channel_open(struct inode *inode, struct file *file)
 	rc = cuse_send_init(cc);
 	if (rc) {
 		fuse_dev_free(fud);
+		fuse_conn_put(&cc->fc);
 		return rc;
 	}
 	file->private_data = fud;
diff --git a/fs/libfs.c b/fs/libfs.c
index 0fb590d..bd2d193d 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -86,58 +86,47 @@ int dcache_dir_close(struct inode *inode, struct file *file)
 EXPORT_SYMBOL(dcache_dir_close);
 
 /* parent is locked at least shared */
-static struct dentry *next_positive(struct dentry *parent,
-				    struct list_head *from,
-				    int count)
+/*
+ * Returns an element of siblings' list.
+ * We are looking for <count>th positive after <p>; if
+ * found, dentry is grabbed and passed to caller via *<res>.
+ * If no such element exists, the anchor of list is returned
+ * and *<res> is set to NULL.
+ */
+static struct list_head *scan_positives(struct dentry *cursor,
+					struct list_head *p,
+					loff_t count,
+					struct dentry **res)
 {
-	unsigned *seq = &parent->d_inode->i_dir_seq, n;
-	struct dentry *res;
-	struct list_head *p;
-	bool skipped;
-	int i;
+	struct dentry *dentry = cursor->d_parent, *found = NULL;
 
-retry:
-	i = count;
-	skipped = false;
-	n = smp_load_acquire(seq) & ~1;
-	res = NULL;
-	rcu_read_lock();
-	for (p = from->next; p != &parent->d_subdirs; p = p->next) {
+	spin_lock(&dentry->d_lock);
+	while ((p = p->next) != &dentry->d_subdirs) {
 		struct dentry *d = list_entry(p, struct dentry, d_child);
-		if (!simple_positive(d)) {
-			skipped = true;
-		} else if (!--i) {
-			res = d;
-			break;
+		// we must at least skip cursors, to avoid livelocks
+		if (d->d_flags & DCACHE_DENTRY_CURSOR)
+			continue;
+		if (simple_positive(d) && !--count) {
+			spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
+			if (simple_positive(d))
+				found = dget_dlock(d);
+			spin_unlock(&d->d_lock);
+			if (likely(found))
+				break;
+			count = 1;
+		}
+		if (need_resched()) {
+			list_move(&cursor->d_child, p);
+			p = &cursor->d_child;
+			spin_unlock(&dentry->d_lock);
+			cond_resched();
+			spin_lock(&dentry->d_lock);
 		}
 	}
-	rcu_read_unlock();
-	if (skipped) {
-		smp_rmb();
-		if (unlikely(*seq != n))
-			goto retry;
-	}
-	return res;
-}
-
-static void move_cursor(struct dentry *cursor, struct list_head *after)
-{
-	struct dentry *parent = cursor->d_parent;
-	unsigned n, *seq = &parent->d_inode->i_dir_seq;
-	spin_lock(&parent->d_lock);
-	for (;;) {
-		n = *seq;
-		if (!(n & 1) && cmpxchg(seq, n, n + 1) == n)
-			break;
-		cpu_relax();
-	}
-	__list_del(cursor->d_child.prev, cursor->d_child.next);
-	if (after)
-		list_add(&cursor->d_child, after);
-	else
-		list_add_tail(&cursor->d_child, &parent->d_subdirs);
-	smp_store_release(seq, n + 2);
-	spin_unlock(&parent->d_lock);
+	spin_unlock(&dentry->d_lock);
+	dput(*res);
+	*res = found;
+	return p;
 }
 
 loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
@@ -153,17 +142,28 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
 			return -EINVAL;
 	}
 	if (offset != file->f_pos) {
-		file->f_pos = offset;
-		if (file->f_pos >= 2) {
-			struct dentry *cursor = file->private_data;
-			struct dentry *to;
-			loff_t n = file->f_pos - 2;
+		struct dentry *cursor = file->private_data;
+		struct dentry *to = NULL;
+		struct list_head *p;
 
-			inode_lock_shared(dentry->d_inode);
-			to = next_positive(dentry, &dentry->d_subdirs, n);
-			move_cursor(cursor, to ? &to->d_child : NULL);
-			inode_unlock_shared(dentry->d_inode);
+		file->f_pos = offset;
+		inode_lock_shared(dentry->d_inode);
+
+		if (file->f_pos > 2) {
+			p = scan_positives(cursor, &dentry->d_subdirs,
+					   file->f_pos - 2, &to);
+			spin_lock(&dentry->d_lock);
+			list_move(&cursor->d_child, p);
+			spin_unlock(&dentry->d_lock);
+		} else {
+			spin_lock(&dentry->d_lock);
+			list_del_init(&cursor->d_child);
+			spin_unlock(&dentry->d_lock);
 		}
+
+		dput(to);
+
+		inode_unlock_shared(dentry->d_inode);
 	}
 	return offset;
 }
@@ -185,25 +185,29 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
 {
 	struct dentry *dentry = file->f_path.dentry;
 	struct dentry *cursor = file->private_data;
-	struct list_head *p = &cursor->d_child;
-	struct dentry *next;
-	bool moved = false;
+	struct list_head *anchor = &dentry->d_subdirs;
+	struct dentry *next = NULL;
+	struct list_head *p;
 
 	if (!dir_emit_dots(file, ctx))
 		return 0;
 
 	if (ctx->pos == 2)
-		p = &dentry->d_subdirs;
-	while ((next = next_positive(dentry, p, 1)) != NULL) {
+		p = anchor;
+	else
+		p = &cursor->d_child;
+
+	while ((p = scan_positives(cursor, p, 1, &next)) != anchor) {
 		if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
 			      d_inode(next)->i_ino, dt_type(d_inode(next))))
 			break;
-		moved = true;
-		p = &next->d_child;
 		ctx->pos++;
 	}
-	if (moved)
-		move_cursor(cursor, p);
+	spin_lock(&dentry->d_lock);
+	list_move_tail(&cursor->d_child, p);
+	spin_unlock(&dentry->d_lock);
+	dput(next);
+
 	return 0;
 }
 EXPORT_SYMBOL(dcache_readdir);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index f516ace..29b7033 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -122,32 +122,49 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
 }
 
 static void
-nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr)
+nfs_direct_handle_truncated(struct nfs_direct_req *dreq,
+			    const struct nfs_pgio_header *hdr,
+			    ssize_t dreq_len)
 {
-	int i;
-	ssize_t count;
+	struct nfs_direct_mirror *mirror = &dreq->mirrors[hdr->pgio_mirror_idx];
 
-	WARN_ON_ONCE(dreq->count >= dreq->max_count);
+	if (!(test_bit(NFS_IOHDR_ERROR, &hdr->flags) ||
+	      test_bit(NFS_IOHDR_EOF, &hdr->flags)))
+		return;
+	if (dreq->max_count >= dreq_len) {
+		dreq->max_count = dreq_len;
+		if (dreq->count > dreq_len)
+			dreq->count = dreq_len;
 
-	if (dreq->mirror_count == 1) {
-		dreq->mirrors[hdr->pgio_mirror_idx].count += hdr->good_bytes;
-		dreq->count += hdr->good_bytes;
-	} else {
-		/* mirrored writes */
-		count = dreq->mirrors[hdr->pgio_mirror_idx].count;
-		if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
-			count = hdr->io_start + hdr->good_bytes - dreq->io_start;
-			dreq->mirrors[hdr->pgio_mirror_idx].count = count;
-		}
-		/* update the dreq->count by finding the minimum agreed count from all
-		 * mirrors */
-		count = dreq->mirrors[0].count;
-
-		for (i = 1; i < dreq->mirror_count; i++)
-			count = min(count, dreq->mirrors[i].count);
-
-		dreq->count = count;
+		if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
+			dreq->error = hdr->error;
+		else /* Clear outstanding error if this is EOF */
+			dreq->error = 0;
 	}
+	if (mirror->count > dreq_len)
+		mirror->count = dreq_len;
+}
+
+static void
+nfs_direct_count_bytes(struct nfs_direct_req *dreq,
+		       const struct nfs_pgio_header *hdr)
+{
+	struct nfs_direct_mirror *mirror = &dreq->mirrors[hdr->pgio_mirror_idx];
+	loff_t hdr_end = hdr->io_start + hdr->good_bytes;
+	ssize_t dreq_len = 0;
+
+	if (hdr_end > dreq->io_start)
+		dreq_len = hdr_end - dreq->io_start;
+
+	nfs_direct_handle_truncated(dreq, hdr, dreq_len);
+
+	if (dreq_len > dreq->max_count)
+		dreq_len = dreq->max_count;
+
+	if (mirror->count < dreq_len)
+		mirror->count = dreq_len;
+	if (dreq->count < dreq_len)
+		dreq->count = dreq_len;
 }
 
 /*
@@ -401,20 +418,12 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
 	struct nfs_direct_req *dreq = hdr->dreq;
 
 	spin_lock(&dreq->lock);
-	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
-		dreq->error = hdr->error;
-
 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
 		spin_unlock(&dreq->lock);
 		goto out_put;
 	}
 
-	if (hdr->good_bytes != 0)
-		nfs_direct_good_bytes(dreq, hdr);
-
-	if (test_bit(NFS_IOHDR_EOF, &hdr->flags))
-		dreq->error = 0;
-
+	nfs_direct_count_bytes(dreq, hdr);
 	spin_unlock(&dreq->lock);
 
 	while (!list_empty(&hdr->pages)) {
@@ -651,6 +660,9 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
 	nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
 
 	dreq->count = 0;
+	dreq->max_count = 0;
+	list_for_each_entry(req, &reqs, wb_list)
+		dreq->max_count += req->wb_bytes;
 	dreq->verf.committed = NFS_INVALID_STABLE_HOW;
 	nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
 	for (i = 0; i < dreq->mirror_count; i++)
@@ -783,17 +795,13 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 
 	spin_lock(&dreq->lock);
-
-	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
-		dreq->error = hdr->error;
-
 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
 		spin_unlock(&dreq->lock);
 		goto out_put;
 	}
 
+	nfs_direct_count_bytes(dreq, hdr);
 	if (hdr->good_bytes != 0) {
-		nfs_direct_good_bytes(dreq, hdr);
 		if (nfs_write_need_commit(hdr)) {
 			if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
 				request_commit = true;
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index b7bde12..1c0227c 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -1171,7 +1171,7 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap,
 		} else
 			*p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
 	}
-	if (bmval[2] & FATTR4_WORD2_SECURITY_LABEL) {
+	if (label && (bmval[2] & FATTR4_WORD2_SECURITY_LABEL)) {
 		*p++ = cpu_to_be32(label->lfs);
 		*p++ = cpu_to_be32(label->pi);
 		*p++ = cpu_to_be32(label->len);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 4931c3a..c818f98 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1426,10 +1426,15 @@ void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
 	const nfs4_stateid *res_stateid = NULL;
 	struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
 
-	if (ret == 0) {
-		arg_stateid = &args->stateid;
+	switch (ret) {
+	case -NFS4ERR_NOMATCHING_LAYOUT:
+		break;
+	case 0:
 		if (res->lrs_present)
 			res_stateid = &res->stateid;
+		/* Fallthrough */
+	default:
+		arg_stateid = &args->stateid;
 	}
 	pnfs_layoutreturn_free_lsegs(lo, arg_stateid, &args->range,
 			res_stateid);
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index bd34756..c492cbb 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -231,7 +231,8 @@ void ocfs2_recovery_exit(struct ocfs2_super *osb)
 	/* At this point, we know that no more recovery threads can be
 	 * launched, so wait for any recovery completion work to
 	 * complete. */
-	flush_workqueue(osb->ocfs2_wq);
+	if (osb->ocfs2_wq)
+		flush_workqueue(osb->ocfs2_wq);
 
 	/*
 	 * Now that recovery is shut down, and the osb is about to be
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index 3020823..a46aff7 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -391,7 +391,8 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
 	struct ocfs2_dinode *alloc = NULL;
 
 	cancel_delayed_work(&osb->la_enable_wq);
-	flush_workqueue(osb->ocfs2_wq);
+	if (osb->ocfs2_wq)
+		flush_workqueue(osb->ocfs2_wq);
 
 	if (osb->local_alloc_state == OCFS2_LA_UNUSED)
 		goto out;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index cad3970..d538123 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -552,7 +552,7 @@ static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns,
 	unsigned long totalpages = totalram_pages + total_swap_pages;
 	unsigned long points = 0;
 
-	points = oom_badness(task, NULL, NULL, totalpages) *
+	points = oom_badness(task, NULL, NULL, totalpages, false) *
 					1000 / totalpages;
 	seq_printf(m, "%lu\n", points);
 
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 792c78a..64293df 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -42,10 +42,12 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
 		return -EINVAL;
 
 	while (count > 0) {
-		if (pfn_valid(pfn))
-			ppage = pfn_to_page(pfn);
-		else
-			ppage = NULL;
+		/*
+		 * TODO: ZONE_DEVICE support requires to identify
+		 * memmaps that were actually initialized.
+		 */
+		ppage = pfn_to_online_page(pfn);
+
 		if (!ppage || PageSlab(ppage))
 			pcount = 0;
 		else
@@ -216,10 +218,11 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
 		return -EINVAL;
 
 	while (count > 0) {
-		if (pfn_valid(pfn))
-			ppage = pfn_to_page(pfn);
-		else
-			ppage = NULL;
+		/*
+		 * TODO: ZONE_DEVICE support requires to identify
+		 * memmaps that were actually initialized.
+		 */
+		ppage = pfn_to_online_page(pfn);
 
 		if (put_user(stable_page_flags(ppage), out)) {
 			ret = -EFAULT;
@@ -261,10 +264,11 @@ static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
 		return -EINVAL;
 
 	while (count > 0) {
-		if (pfn_valid(pfn))
-			ppage = pfn_to_page(pfn);
-		else
-			ppage = NULL;
+		/*
+		 * TODO: ZONE_DEVICE support requires to identify
+		 * memmaps that were actually initialized.
+		 */
+		ppage = pfn_to_online_page(pfn);
 
 		if (ppage)
 			ino = page_cgroup_ino(ppage);
diff --git a/fs/statfs.c b/fs/statfs.c
index f021662..56f655f 100644
--- a/fs/statfs.c
+++ b/fs/statfs.c
@@ -304,19 +304,10 @@ COMPAT_SYSCALL_DEFINE2(fstatfs, unsigned int, fd, struct compat_statfs __user *,
 static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct kstatfs *kbuf)
 {
 	struct compat_statfs64 buf;
-	if (sizeof(ubuf->f_bsize) == 4) {
-		if ((kbuf->f_type | kbuf->f_bsize | kbuf->f_namelen |
-		     kbuf->f_frsize | kbuf->f_flags) & 0xffffffff00000000ULL)
-			return -EOVERFLOW;
-		/* f_files and f_ffree may be -1; it's okay
-		 * to stuff that into 32 bits */
-		if (kbuf->f_files != 0xffffffffffffffffULL
-		 && (kbuf->f_files & 0xffffffff00000000ULL))
-			return -EOVERFLOW;
-		if (kbuf->f_ffree != 0xffffffffffffffffULL
-		 && (kbuf->f_ffree & 0xffffffff00000000ULL))
-			return -EOVERFLOW;
-	}
+
+	if ((kbuf->f_bsize | kbuf->f_frsize) & 0xffffffff00000000ULL)
+		return -EOVERFLOW;
+
 	memset(&buf, 0, sizeof(struct compat_statfs64));
 	buf.f_type = kbuf->f_type;
 	buf.f_bsize = kbuf->f_bsize;
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index c50ef7e..1d4ef06 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -1472,8 +1472,11 @@ struct acpi_pptt_processor {
 
 /* Flags */
 
-#define ACPI_PPTT_PHYSICAL_PACKAGE          (1)	/* Physical package */
-#define ACPI_PPTT_ACPI_PROCESSOR_ID_VALID   (2)	/* ACPI Processor ID valid */
+#define ACPI_PPTT_PHYSICAL_PACKAGE          (1)
+#define ACPI_PPTT_ACPI_PROCESSOR_ID_VALID   (1<<1)
+#define ACPI_PPTT_ACPI_PROCESSOR_IS_THREAD  (1<<2)	/* ACPI 6.3 */
+#define ACPI_PPTT_ACPI_LEAF_NODE            (1<<3)	/* ACPI 6.3 */
+#define ACPI_PPTT_ACPI_IDENTICAL            (1<<4)	/* ACPI 6.3 */
 
 /* 1: Cache Type Structure */
 
diff --git a/include/dt-bindings/clock/qcom,gcc-bengal.h b/include/dt-bindings/clock/qcom,gcc-bengal.h
index 28cde9e..4da52fc 100644
--- a/include/dt-bindings/clock/qcom,gcc-bengal.h
+++ b/include/dt-bindings/clock/qcom,gcc-bengal.h
@@ -174,6 +174,7 @@
 #define GCC_CAMSS_CPHY_1_CLK					166
 #define GCC_CAMSS_CPHY_2_CLK					167
 #define GCC_UFS_CLKREF_CLK					168
+#define GCC_DISP_GPLL0_CLK_SRC					169
 
 /* GCC resets */
 #define GCC_QUSB2PHY_PRIM_BCR					0
@@ -184,5 +185,7 @@
 #define GCC_VCODEC0_BCR						6
 #define GCC_VENUS_BCR						7
 #define GCC_VIDEO_INTERFACE_BCR					8
+#define GCC_USB3PHY_PHY_PRIM_SP0_BCR				9
+#define GCC_USB3_PHY_PRIM_SP0_BCR				10
 
 #endif
diff --git a/include/dt-bindings/iio/qcom,spmi-vadc.h b/include/dt-bindings/iio/qcom,spmi-vadc.h
index f47edbd..38abbee 100644
--- a/include/dt-bindings/iio/qcom,spmi-vadc.h
+++ b/include/dt-bindings/iio/qcom,spmi-vadc.h
@@ -222,4 +222,22 @@
 
 #define ADC_MAX_CHANNEL				0xc0
 
+/* VADC scale function index */
+#define ADC_SCALE_DEFAULT			0x0
+#define ADC_SCALE_THERM_100K_PULLUP		0x1
+#define ADC_SCALE_PMIC_THERM			0x2
+#define ADC_SCALE_XOTHERM			0x3
+#define ADC_SCALE_PMI_CHG_TEMP			0x4
+#define ADC_SCALE_HW_CALIB_DEFAULT		0x5
+#define ADC_SCALE_HW_CALIB_THERM_100K_PULLUP	0x6
+#define ADC_SCALE_HW_CALIB_XOTHERM		0x7
+#define ADC_SCALE_HW_CALIB_PMIC_THERM		0x8
+#define ADC_SCALE_HW_CALIB_CUR			0x9
+#define ADC_SCALE_HW_CALIB_PM5_CHG_TEMP		0xA
+#define ADC_SCALE_HW_CALIB_PM5_SMB_TEMP		0xB
+#define ADC_SCALE_HW_CALIB_BATT_THERM_100K	0xC
+#define ADC_SCALE_HW_CALIB_BATT_THERM_30K	0xD
+#define ADC_SCALE_HW_CALIB_BATT_THERM_400K	0xE
+#define ADC_SCALE_HW_CALIB_PM5_SMB1398_TEMP	0xF
+
 #endif /* _DT_BINDINGS_QCOM_SPMI_VADC_H */
diff --git a/include/dt-bindings/phy/qcom,usb3-11nm-qmp-combo.h b/include/dt-bindings/phy/qcom,usb3-11nm-qmp-combo.h
new file mode 100644
index 0000000..47a4a78
--- /dev/null
+++ b/include/dt-bindings/phy/qcom,usb3-11nm-qmp-combo.h
@@ -0,0 +1,643 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_PHY_QCOM_11NM_QMP_COMBO_USB_H
+#define _DT_BINDINGS_PHY_QCOM_11NM_QMP_COMBO_USB_H
+
+#define USB3PHY_QSERDES_COM_ATB_SEL1				0x0000
+#define USB3PHY_QSERDES_COM_ATB_SEL2				0x0004
+#define USB3PHY_QSERDES_COM_FREQ_UPDATE				0x0008
+#define USB3PHY_QSERDES_COM_BG_TIMER				0x000C
+#define USB3PHY_QSERDES_COM_SSC_EN_CENTER			0x0010
+#define USB3PHY_QSERDES_COM_SSC_ADJ_PER1			0x0014
+#define USB3PHY_QSERDES_COM_SSC_ADJ_PER2			0x0018
+#define USB3PHY_QSERDES_COM_SSC_PER1				0x001C
+#define USB3PHY_QSERDES_COM_SSC_PER2				0x0020
+#define USB3PHY_QSERDES_COM_SSC_STEP_SIZE1			0x0024
+#define USB3PHY_QSERDES_COM_SSC_STEP_SIZE2			0x0028
+#define USB3PHY_QSERDES_COM_POST_DIV				0x002C
+#define USB3PHY_QSERDES_COM_POST_DIV_MUX			0x0030
+#define USB3PHY_QSERDES_COM_BIAS_EN_CLKBUFLR_EN			0x0034
+#define USB3PHY_QSERDES_COM_CLK_ENABLE1				0x0038
+#define USB3PHY_QSERDES_COM_SYS_CLK_CTRL			0x003C
+#define USB3PHY_QSERDES_COM_SYSCLK_BUF_ENABLE			0x0040
+#define USB3PHY_QSERDES_COM_PLL_EN				0x0044
+#define USB3PHY_QSERDES_COM_PLL_IVCO				0x0048
+#define USB3PHY_QSERDES_COM_LOCK_CMP1_MODE0			0x004C
+#define USB3PHY_QSERDES_COM_LOCK_CMP2_MODE0			0x0050
+#define USB3PHY_QSERDES_COM_LOCK_CMP3_MODE0			0x0054
+#define USB3PHY_QSERDES_COM_LOCK_CMP1_MODE1			0x0058
+#define USB3PHY_QSERDES_COM_LOCK_CMP2_MODE1			0x005C
+#define USB3PHY_QSERDES_COM_LOCK_CMP3_MODE1			0x0060
+#define USB3PHY_QSERDES_COM_CMN_RSVD0				0x0064
+#define USB3PHY_QSERDES_COM_EP_CLOCK_DETECT_CTRL		0x0068
+#define USB3PHY_QSERDES_COM_SYSCLK_DET_COMP_STATUS		0x006C
+#define USB3PHY_QSERDES_COM_BG_TRIM				0x0070
+#define USB3PHY_QSERDES_COM_CLK_EP_DIV				0x0074
+#define USB3PHY_QSERDES_COM_CP_CTRL_MODE0			0x0078
+#define USB3PHY_QSERDES_COM_CP_CTRL_MODE1			0x007C
+#define USB3PHY_QSERDES_COM_CMN_RSVD1				0x0080
+#define USB3PHY_QSERDES_COM_PLL_RCTRL_MODE0			0x0084
+#define USB3PHY_QSERDES_COM_PLL_RCTRL_MODE1			0x0088
+#define USB3PHY_QSERDES_COM_CMN_RSVD2				0x008C
+#define USB3PHY_QSERDES_COM_PLL_CCTRL_MODE0			0x0090
+#define USB3PHY_QSERDES_COM_PLL_CCTRL_MODE1			0x0094
+#define USB3PHY_QSERDES_COM_CMN_RSVD3				0x0098
+#define USB3PHY_QSERDES_COM_PLL_CNTRL				0x009C
+#define USB3PHY_QSERDES_COM_PHASE_SEL_CTRL			0x00A0
+#define USB3PHY_QSERDES_COM_PHASE_SEL_DC			0x00A4
+#define USB3PHY_QSERDES_COM_BIAS_EN_CTRL_BY_PSM			0x00A8
+#define USB3PHY_QSERDES_COM_SYSCLK_EN_SEL			0x00AC
+#define USB3PHY_QSERDES_COM_CML_SYSCLK_SEL			0x00B0
+#define USB3PHY_QSERDES_COM_RESETSM_CNTRL			0x00B4
+#define USB3PHY_QSERDES_COM_RESETSM_CNTRL2			0x00B8
+#define USB3PHY_QSERDES_COM_RESTRIM_CTRL			0x00BC
+#define USB3PHY_QSERDES_COM_RESTRIM_CTRL2			0x00C0
+#define USB3PHY_QSERDES_COM_RESCODE_DIV_NUM			0x00C4
+#define USB3PHY_QSERDES_COM_LOCK_CMP_EN				0x00C8
+#define USB3PHY_QSERDES_COM_LOCK_CMP_CFG			0x00CC
+#define USB3PHY_QSERDES_COM_DEC_START_MODE0			0x00D0
+#define USB3PHY_QSERDES_COM_DEC_START_MODE1			0x00D4
+#define USB3PHY_QSERDES_COM_VCOCAL_DEADMAN_CTRL			0x00D8
+#define USB3PHY_QSERDES_COM_DIV_FRAC_START1_MODE0		0x00DC
+#define USB3PHY_QSERDES_COM_DIV_FRAC_START2_MODE0		0x00E0
+#define USB3PHY_QSERDES_COM_DIV_FRAC_START3_MODE0		0x00E4
+#define USB3PHY_QSERDES_COM_DIV_FRAC_START1_MODE1		0x00E8
+#define USB3PHY_QSERDES_COM_DIV_FRAC_START2_MODE1		0x00EC
+#define USB3PHY_QSERDES_COM_DIV_FRAC_START3_MODE1		0x00F0
+#define USB3PHY_QSERDES_COM_VCO_TUNE_MINVAL1			0x00F4
+#define USB3PHY_QSERDES_COM_VCO_TUNE_MINVAL2			0x00F8
+#define USB3PHY_QSERDES_COM_CMN_RSVD4				0x00FC
+#define USB3PHY_QSERDES_COM_INTEGLOOP_INITVAL			0x0100
+#define USB3PHY_QSERDES_COM_INTEGLOOP_EN			0x0104
+#define USB3PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE0		0x0108
+#define USB3PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE0		0x010C
+#define USB3PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE1		0x0110
+#define USB3PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE1		0x0114
+#define USB3PHY_QSERDES_COM_VCO_TUNE_MAXVAL1			0x0118
+#define USB3PHY_QSERDES_COM_VCO_TUNE_MAXVAL2			0x011C
+#define USB3PHY_QSERDES_COM_RES_TRIM_CONTROL2			0x0120
+#define USB3PHY_QSERDES_COM_VCO_TUNE_CTRL			0x0124
+#define USB3PHY_QSERDES_COM_VCO_TUNE_MAP			0x0128
+#define USB3PHY_QSERDES_COM_VCO_TUNE1_MODE0			0x012C
+#define USB3PHY_QSERDES_COM_VCO_TUNE2_MODE0			0x0130
+#define USB3PHY_QSERDES_COM_VCO_TUNE1_MODE1			0x0134
+#define USB3PHY_QSERDES_COM_VCO_TUNE2_MODE1			0x0138
+#define USB3PHY_QSERDES_COM_VCO_TUNE_INITVAL1			0x013C
+#define USB3PHY_QSERDES_COM_VCO_TUNE_INITVAL2			0x0140
+#define USB3PHY_QSERDES_COM_VCO_TUNE_TIMER1			0x0144
+#define USB3PHY_QSERDES_COM_VCO_TUNE_TIMER2			0x0148
+#define USB3PHY_QSERDES_COM_SAR					0x014C
+#define USB3PHY_QSERDES_COM_SAR_CLK				0x0150
+#define USB3PHY_QSERDES_COM_SAR_CODE_OUT_STATUS			0x0154
+#define USB3PHY_QSERDES_COM_SAR_CODE_READY_STATUS		0x0158
+#define USB3PHY_QSERDES_COM_CMN_STATUS				0x015C
+#define USB3PHY_QSERDES_COM_RESET_SM_STATUS			0x0160
+#define USB3PHY_QSERDES_COM_RESTRIM_CODE_STATUS			0x0164
+#define USB3PHY_QSERDES_COM_PLLCAL_CODE1_STATUS			0x0168
+#define USB3PHY_QSERDES_COM_PLLCAL_CODE2_STATUS			0x016C
+#define USB3PHY_QSERDES_COM_BG_CTRL				0x0170
+#define USB3PHY_QSERDES_COM_CLK_SELECT				0x0174
+#define USB3PHY_QSERDES_COM_HSCLK_SEL				0x0178
+#define USB3PHY_QSERDES_COM_INTEGLOOP_BINCODE_STATUS		0x017C
+#define USB3PHY_QSERDES_COM_PLL_ANALOG				0x0180
+#define USB3PHY_QSERDES_COM_CORECLK_DIV				0x0184
+#define USB3PHY_QSERDES_COM_SW_RESET				0x0188
+#define USB3PHY_QSERDES_COM_CORE_CLK_EN				0x018C
+#define USB3PHY_QSERDES_COM_C_READY_STATUS			0x0190
+#define USB3PHY_QSERDES_COM_CMN_CONFIG				0x0194
+#define USB3PHY_QSERDES_COM_CMN_RATE_OVERRIDE			0x0198
+#define USB3PHY_QSERDES_COM_SVS_MODE_CLK_SEL			0x019C
+#define USB3PHY_QSERDES_COM_DEBUG_BUS0				0x01A0
+#define USB3PHY_QSERDES_COM_DEBUG_BUS1				0x01A4
+#define USB3PHY_QSERDES_COM_DEBUG_BUS2				0x01A8
+#define USB3PHY_QSERDES_COM_DEBUG_BUS3				0x01AC
+#define USB3PHY_QSERDES_COM_DEBUG_BUS_SEL			0x01B0
+#define USB3PHY_QSERDES_COM_CMN_MISC1				0x01B4
+#define USB3PHY_QSERDES_COM_CMN_MISC2				0x01B8
+#define USB3PHY_QSERDES_COM_CORECLK_DIV_MODE1			0x01BC
+#define USB3PHY_QSERDES_COM_CMN_RSVD5				0x01C0
+#define USB3PHY_QSERDES_TXA_BIST_MODE_LANENO			0x0200
+#define USB3PHY_QSERDES_TXA_BIST_INVERT				0x0204
+#define USB3PHY_QSERDES_TXA_CLKBUF_ENABLE			0x0208
+#define USB3PHY_QSERDES_TXA_TX_EMP_POST1_LVL			0x020C
+#define USB3PHY_QSERDES_TXA_TX_POST2_EMPH			0x0210
+#define USB3PHY_QSERDES_TXA_TX_BOOST_LVL_UP_DN			0x0214
+#define USB3PHY_QSERDES_TXA_TX_IDLE_LVL_LARGE_AMP		0x0218
+#define USB3PHY_QSERDES_TXA_TX_DRV_LVL				0x021C
+#define USB3PHY_QSERDES_TXA_TX_DRV_LVL_OFFSET			0x0220
+#define USB3PHY_QSERDES_TXA_RESET_TSYNC_EN			0x0224
+#define USB3PHY_QSERDES_TXA_PRE_STALL_LDO_BOOST_EN		0x0228
+#define USB3PHY_QSERDES_TXA_TX_BAND				0x022C
+#define USB3PHY_QSERDES_TXA_SLEW_CNTL				0x0230
+#define USB3PHY_QSERDES_TXA_INTERFACE_SELECT			0x0234
+#define USB3PHY_QSERDES_TXA_LPB_EN				0x0238
+#define USB3PHY_QSERDES_TXA_RES_CODE_LANE_TX			0x023C
+#define USB3PHY_QSERDES_TXA_RES_CODE_LANE_RX			0x0240
+#define USB3PHY_QSERDES_TXA_RES_CODE_LANE_OFFSET_TX		0x0244
+#define USB3PHY_QSERDES_TXA_RES_CODE_LANE_OFFSET_RX		0x0248
+#define USB3PHY_QSERDES_TXA_PERL_LENGTH1			0x024C
+#define USB3PHY_QSERDES_TXA_PERL_LENGTH2			0x0250
+#define USB3PHY_QSERDES_TXA_SERDES_BYP_EN_OUT			0x0254
+#define USB3PHY_QSERDES_TXA_DEBUG_BUS_SEL			0x0258
+#define USB3PHY_QSERDES_TXA_TRANSCEIVER_BIAS_EN			0x025C
+#define USB3PHY_QSERDES_TXA_HIGHZ_DRVR_EN			0x0260
+#define USB3PHY_QSERDES_TXA_TX_POL_INV				0x0264
+#define USB3PHY_QSERDES_TXA_PARRATE_REC_DETECT_IDLE_EN		0x0268
+#define USB3PHY_QSERDES_TXA_BIST_PATTERN1			0x026C
+#define USB3PHY_QSERDES_TXA_BIST_PATTERN2			0x0270
+#define USB3PHY_QSERDES_TXA_BIST_PATTERN3			0x0274
+#define USB3PHY_QSERDES_TXA_BIST_PATTERN4			0x0278
+#define USB3PHY_QSERDES_TXA_BIST_PATTERN5			0x027C
+#define USB3PHY_QSERDES_TXA_BIST_PATTERN6			0x0280
+#define USB3PHY_QSERDES_TXA_BIST_PATTERN7			0x0284
+#define USB3PHY_QSERDES_TXA_BIST_PATTERN8			0x0288
+#define USB3PHY_QSERDES_TXA_LANE_MODE_1				0x028C
+#define USB3PHY_QSERDES_TXA_LANE_MODE_2				0x0290
+#define USB3PHY_QSERDES_TXA_LANE_MODE_3				0x0294
+#define USB3PHY_QSERDES_TXA_ATB_SEL1				0x0298
+#define USB3PHY_QSERDES_TXA_ATB_SEL2				0x029C
+#define USB3PHY_QSERDES_TXA_RCV_DETECT_LVL			0x02A0
+#define USB3PHY_QSERDES_TXA_RCV_DETECT_LVL_2			0x02A4
+#define USB3PHY_QSERDES_TXA_PRBS_SEED1				0x02A8
+#define USB3PHY_QSERDES_TXA_PRBS_SEED2				0x02AC
+#define USB3PHY_QSERDES_TXA_PRBS_SEED3				0x02B0
+#define USB3PHY_QSERDES_TXA_PRBS_SEED4				0x02B4
+#define USB3PHY_QSERDES_TXA_RESET_GEN				0x02B8
+#define USB3PHY_QSERDES_TXA_RESET_GEN_MUXES			0x02BC
+#define USB3PHY_QSERDES_TXA_TRAN_DRVR_EMP_EN			0x02C0
+#define USB3PHY_QSERDES_TXA_TX_INTERFACE_MODE			0x02C4
+#define USB3PHY_QSERDES_TXA_PWM_CTRL				0x02C8
+#define USB3PHY_QSERDES_TXA_PWM_ENCODED_OR_DATA			0x02CC
+#define USB3PHY_QSERDES_TXA_PWM_GEAR_1_DIVIDER_BAND2		0x02D0
+#define USB3PHY_QSERDES_TXA_PWM_GEAR_2_DIVIDER_BAND2		0x02D4
+#define USB3PHY_QSERDES_TXA_PWM_GEAR_3_DIVIDER_BAND2		0x02D8
+#define USB3PHY_QSERDES_TXA_PWM_GEAR_4_DIVIDER_BAND2		0x02DC
+#define USB3PHY_QSERDES_TXA_PWM_GEAR_1_DIVIDER_BAND0_1		0x02E0
+#define USB3PHY_QSERDES_TXA_PWM_GEAR_2_DIVIDER_BAND0_1		0x02E4
+#define USB3PHY_QSERDES_TXA_PWM_GEAR_3_DIVIDER_BAND0_1		0x02E8
+#define USB3PHY_QSERDES_TXA_PWM_GEAR_4_DIVIDER_BAND0_1		0x02EC
+#define USB3PHY_QSERDES_TXA_VMODE_CTRL1				0x02F0
+#define USB3PHY_QSERDES_TXA_ALOG_OBSV_BUS_CTRL_1		0x02F4
+#define USB3PHY_QSERDES_TXA_BIST_STATUS				0x02F8
+#define USB3PHY_QSERDES_TXA_BIST_ERROR_COUNT1			0x02FC
+#define USB3PHY_QSERDES_TXA_BIST_ERROR_COUNT2			0x0300
+#define USB3PHY_QSERDES_TXA_ALOG_OBSV_BUS_STATUS_1		0x0304
+#define USB3PHY_QSERDES_TXA_DIG_BKUP_CTRL			0x0308
+#define USB3PHY_QSERDES_RXA_UCDR_FO_GAIN_HALF			0x0400
+#define USB3PHY_QSERDES_RXA_UCDR_FO_GAIN_QUARTER		0x0404
+#define USB3PHY_QSERDES_RXA_UCDR_FO_GAIN			0x0408
+#define USB3PHY_QSERDES_RXA_UCDR_SO_GAIN_HALF			0x040C
+#define USB3PHY_QSERDES_RXA_UCDR_SO_GAIN_QUARTER		0x0410
+#define USB3PHY_QSERDES_RXA_UCDR_SO_GAIN			0x0414
+#define USB3PHY_QSERDES_RXA_UCDR_SVS_FO_GAIN_HALF		0x0418
+#define USB3PHY_QSERDES_RXA_UCDR_SVS_FO_GAIN_QUARTER		0x041C
+#define USB3PHY_QSERDES_RXA_UCDR_SVS_FO_GAIN			0x0420
+#define USB3PHY_QSERDES_RXA_UCDR_SVS_SO_GAIN_HALF		0x0424
+#define USB3PHY_QSERDES_RXA_UCDR_SVS_SO_GAIN_QUARTER		0x0428
+#define USB3PHY_QSERDES_RXA_UCDR_SVS_SO_GAIN			0x042C
+#define USB3PHY_QSERDES_RXA_UCDR_FASTLOCK_FO_GAIN		0x0430
+#define USB3PHY_QSERDES_RXA_UCDR_SO_SATURATION_AND_ENABLE	0x0434
+#define USB3PHY_QSERDES_RXA_UCDR_FO_TO_SO_DELAY			0x0438
+#define USB3PHY_QSERDES_RXA_UCDR_FASTLOCK_COUNT_LOW		0x043C
+#define USB3PHY_QSERDES_RXA_UCDR_FASTLOCK_COUNT_HIGH		0x0440
+#define USB3PHY_QSERDES_RXA_UCDR_PI_CONTROLS			0x0444
+#define USB3PHY_QSERDES_RXA_UCDR_SB2_THRESH1			0x0448
+#define USB3PHY_QSERDES_RXA_UCDR_SB2_THRESH2			0x044C
+#define USB3PHY_QSERDES_RXA_UCDR_SB2_GAIN1			0x0450
+#define USB3PHY_QSERDES_RXA_UCDR_SB2_GAIN2			0x0454
+#define USB3PHY_QSERDES_RXA_AUX_CONTROL				0x0458
+#define USB3PHY_QSERDES_RXA_AUX_DATA_TCOARSE_TFINE		0x045C
+#define USB3PHY_QSERDES_RXA_RCLK_AUXDATA_SEL			0x0460
+#define USB3PHY_QSERDES_RXA_AC_JTAG_ENABLE			0x0464
+#define USB3PHY_QSERDES_RXA_AC_JTAG_INITP			0x0468
+#define USB3PHY_QSERDES_RXA_AC_JTAG_INITN			0x046C
+#define USB3PHY_QSERDES_RXA_AC_JTAG_LVL				0x0470
+#define USB3PHY_QSERDES_RXA_AC_JTAG_MODE			0x0474
+#define USB3PHY_QSERDES_RXA_AC_JTAG_RESET			0x0478
+#define USB3PHY_QSERDES_RXA_RX_TERM_BW				0x047C
+#define USB3PHY_QSERDES_RXA_RX_RCVR_IQ_EN			0x0480
+#define USB3PHY_QSERDES_RXA_RX_IDAC_I_DC_OFFSETS		0x0484
+#define USB3PHY_QSERDES_RXA_RX_IDAC_IBAR_DC_OFFSETS		0x0488
+#define USB3PHY_QSERDES_RXA_RX_IDAC_Q_DC_OFFSETS		0x048C
+#define USB3PHY_QSERDES_RXA_RX_IDAC_QBAR_DC_OFFSETS		0x0490
+#define USB3PHY_QSERDES_RXA_RX_IDAC_A_DC_OFFSETS		0x0494
+#define USB3PHY_QSERDES_RXA_RX_IDAC_ABAR_DC_OFFSETS		0x0498
+#define USB3PHY_QSERDES_RXA_RX_IDAC_EN				0x049C
+#define USB3PHY_QSERDES_RXA_RX_IDAC_ENABLES			0x04A0
+#define USB3PHY_QSERDES_RXA_RX_IDAC_SIGN			0x04A4
+#define USB3PHY_QSERDES_RXA_RX_HIGHZ_HIGHRATE			0x04A8
+#define USB3PHY_QSERDES_RXA_RX_TERM_AC_BYPASS_DC_COUPLE_OFFSET	0x04AC
+#define USB3PHY_QSERDES_RXA_DFE_1				0x04B0
+#define USB3PHY_QSERDES_RXA_DFE_2				0x04B4
+#define USB3PHY_QSERDES_RXA_DFE_3				0x04B8
+#define USB3PHY_QSERDES_RXA_VGA_CAL_CNTRL1			0x04BC
+#define USB3PHY_QSERDES_RXA_VGA_CAL_CNTRL2			0x04C0
+#define USB3PHY_QSERDES_RXA_GM_CAL				0x04C4
+#define USB3PHY_QSERDES_RXA_RX_EQ_GAIN2_LSB			0x04C8
+#define USB3PHY_QSERDES_RXA_RX_EQ_GAIN2_MSB			0x04CC
+#define USB3PHY_QSERDES_RXA_RX_EQU_ADAPTOR_CNTRL1		0x04D0
+#define USB3PHY_QSERDES_RXA_RX_EQU_ADAPTOR_CNTRL2		0x04D4
+#define USB3PHY_QSERDES_RXA_RX_EQU_ADAPTOR_CNTRL3		0x04D8
+#define USB3PHY_QSERDES_RXA_RX_EQU_ADAPTOR_CNTRL4		0x04DC
+#define USB3PHY_QSERDES_RXA_RX_IDAC_TSETTLE_LOW			0x04E0
+#define USB3PHY_QSERDES_RXA_RX_IDAC_TSETTLE_HIGH		0x04E4
+#define USB3PHY_QSERDES_RXA_RX_IDAC_MEASURE_TIME		0x04E8
+#define USB3PHY_QSERDES_RXA_RX_IDAC_ACCUMULATOR			0x04EC
+#define USB3PHY_QSERDES_RXA_RX_EQ_OFFSET_LSB			0x04F0
+#define USB3PHY_QSERDES_RXA_RX_EQ_OFFSET_MSB			0x04F4
+#define USB3PHY_QSERDES_RXA_RX_EQ_OFFSET_ADAPTOR_CNTRL1		0x04F8
+#define USB3PHY_QSERDES_RXA_RX_OFFSET_ADAPTOR_CNTRL2		0x04FC
+#define USB3PHY_QSERDES_RXA_SIGDET_ENABLES			0x0500
+#define USB3PHY_QSERDES_RXA_SIGDET_CNTRL			0x0504
+#define USB3PHY_QSERDES_RXA_SIGDET_LVL				0x0508
+#define USB3PHY_QSERDES_RXA_SIGDET_DEGLITCH_CNTRL		0x050C
+#define USB3PHY_QSERDES_RXA_RX_BAND				0x0510
+#define USB3PHY_QSERDES_RXA_CDR_FREEZE_UP_DN			0x0514
+#define USB3PHY_QSERDES_RXA_CDR_RESET_OVERRIDE			0x0518
+#define USB3PHY_QSERDES_RXA_RX_INTERFACE_MODE			0x051C
+#define USB3PHY_QSERDES_RXA_JITTER_GEN_MODE			0x0520
+#define USB3PHY_QSERDES_RXA_BUJ_AMP				0x0524
+#define USB3PHY_QSERDES_RXA_SJ_AMP1				0x0528
+#define USB3PHY_QSERDES_RXA_SJ_AMP2				0x052C
+#define USB3PHY_QSERDES_RXA_SJ_PER1				0x0530
+#define USB3PHY_QSERDES_RXA_SJ_PER2				0x0534
+#define USB3PHY_QSERDES_RXA_BUJ_STEP_FREQ1			0x0538
+#define USB3PHY_QSERDES_RXA_BUJ_STEP_FREQ2			0x053C
+#define USB3PHY_QSERDES_RXA_PPM_OFFSET1				0x0540
+#define USB3PHY_QSERDES_RXA_PPM_OFFSET2				0x0544
+#define USB3PHY_QSERDES_RXA_SIGN_PPM_PERIOD1			0x0548
+#define USB3PHY_QSERDES_RXA_SIGN_PPM_PERIOD2			0x054C
+#define USB3PHY_QSERDES_RXA_RX_PWM_ENABLE_AND_DATA		0x0550
+#define USB3PHY_QSERDES_RXA_RX_PWM_GEAR1_TIMEOUT_COUNT		0x0554
+#define USB3PHY_QSERDES_RXA_RX_PWM_GEAR2_TIMEOUT_COUNT		0x0558
+#define USB3PHY_QSERDES_RXA_RX_PWM_GEAR3_TIMEOUT_COUNT		0x055C
+#define USB3PHY_QSERDES_RXA_RX_PWM_GEAR4_TIMEOUT_COUNT		0x0560
+#define USB3PHY_QSERDES_RXA_RX_MODE_00				0x0564
+#define USB3PHY_QSERDES_RXA_RX_MODE_01				0x0568
+#define USB3PHY_QSERDES_RXA_RX_MODE_10				0x056C
+#define USB3PHY_QSERDES_RXA_ALOG_OBSV_BUS_CTRL_1		0x0570
+#define USB3PHY_QSERDES_RXA_PI_CTRL1				0x0574
+#define USB3PHY_QSERDES_RXA_PI_CTRL2				0x0578
+#define USB3PHY_QSERDES_RXA_PI_QUAD				0x057C
+#define USB3PHY_QSERDES_RXA_IDATA1				0x0580
+#define USB3PHY_QSERDES_RXA_IDATA2				0x0584
+#define USB3PHY_QSERDES_RXA_AUX_DATA1				0x0588
+#define USB3PHY_QSERDES_RXA_AUX_DATA2				0x058C
+#define USB3PHY_QSERDES_RXA_AC_JTAG_OUTP			0x0590
+#define USB3PHY_QSERDES_RXA_AC_JTAG_OUTN			0x0594
+#define USB3PHY_QSERDES_RXA_RX_SIGDET				0x0598
+#define USB3PHY_QSERDES_RXA_IDAC_STATUS_I			0x059C
+#define USB3PHY_QSERDES_RXA_IDAC_STATUS_IBAR			0x05A0
+#define USB3PHY_QSERDES_RXA_IDAC_STATUS_Q			0x05A4
+#define USB3PHY_QSERDES_RXA_IDAC_STATUS_QBAR			0x05A8
+#define USB3PHY_QSERDES_RXA_IDAC_STATUS_A			0x05AC
+#define USB3PHY_QSERDES_RXA_IDAC_STATUS_ABAR			0x05B0
+#define USB3PHY_QSERDES_RXA_IDAC_STATUS_SM_ON			0x05B4
+#define USB3PHY_QSERDES_RXA_IDAC_STATUS_CAL_DONE		0x05B8
+#define USB3PHY_QSERDES_RXA_IDAC_STATUS_SIGNERROR		0x05BC
+#define USB3PHY_QSERDES_RXA_READ_EQCODE				0x05C0
+#define USB3PHY_QSERDES_RXA_READ_OFFSETCODE			0x05C4
+#define USB3PHY_QSERDES_RXA_IA_ERROR_COUNTER_LOW		0x05C8
+#define USB3PHY_QSERDES_RXA_IA_ERROR_COUNTER_HIGH		0x05CC
+#define USB3PHY_QSERDES_RXA_VGA_READ_CODE			0x05D0
+#define USB3PHY_QSERDES_RXA_DFE_TAP1_READ_CODE			0x05D4
+#define USB3PHY_QSERDES_RXA_DFE_TAP2_READ_CODE			0x05D8
+#define USB3PHY_QSERDES_RXA_ALOG_OBSV_BUS_STATUS_1		0x05DC
+#define USB3PHY_QSERDES_TXB_BIST_MODE_LANENO			0x0600
+#define USB3PHY_QSERDES_TXB_BIST_INVERT				0x0604
+#define USB3PHY_QSERDES_TXB_CLKBUF_ENABLE			0x0608
+#define USB3PHY_QSERDES_TXB_TX_EMP_POST1_LVL			0x060C
+#define USB3PHY_QSERDES_TXB_TX_POST2_EMPH			0x0610
+#define USB3PHY_QSERDES_TXB_TX_BOOST_LVL_UP_DN			0x0614
+#define USB3PHY_QSERDES_TXB_TX_IDLE_LVL_LARGE_AMP		0x0618
+#define USB3PHY_QSERDES_TXB_TX_DRV_LVL				0x061C
+#define USB3PHY_QSERDES_TXB_TX_DRV_LVL_OFFSET			0x0620
+#define USB3PHY_QSERDES_TXB_RESET_TSYNC_EN			0x0624
+#define USB3PHY_QSERDES_TXB_PRE_STALL_LDO_BOOST_EN		0x0628
+#define USB3PHY_QSERDES_TXB_TX_BAND				0x062C
+#define USB3PHY_QSERDES_TXB_SLEW_CNTL				0x0630
+#define USB3PHY_QSERDES_TXB_INTERFACE_SELECT			0x0634
+#define USB3PHY_QSERDES_TXB_LPB_EN				0x0638
+#define USB3PHY_QSERDES_TXB_RES_CODE_LANE_TX			0x063C
+#define USB3PHY_QSERDES_TXB_RES_CODE_LANE_RX			0x0640
+#define USB3PHY_QSERDES_TXB_RES_CODE_LANE_OFFSET_TX		0x0644
+#define USB3PHY_QSERDES_TXB_RES_CODE_LANE_OFFSET_RX		0x0648
+#define USB3PHY_QSERDES_TXB_PERL_LENGTH1			0x064C
+#define USB3PHY_QSERDES_TXB_PERL_LENGTH2			0x0650
+#define USB3PHY_QSERDES_TXB_SERDES_BYP_EN_OUT			0x0654
+#define USB3PHY_QSERDES_TXB_DEBUG_BUS_SEL			0x0658
+#define USB3PHY_QSERDES_TXB_TRANSCEIVER_BIAS_EN			0x065C
+#define USB3PHY_QSERDES_TXB_HIGHZ_DRVR_EN			0x0660
+#define USB3PHY_QSERDES_TXB_TX_POL_INV				0x0664
+#define USB3PHY_QSERDES_TXB_PARRATE_REC_DETECT_IDLE_EN		0x0668
+#define USB3PHY_QSERDES_TXB_BIST_PATTERN1			0x066C
+#define USB3PHY_QSERDES_TXB_BIST_PATTERN2			0x0670
+#define USB3PHY_QSERDES_TXB_BIST_PATTERN3			0x0674
+#define USB3PHY_QSERDES_TXB_BIST_PATTERN4			0x0678
+#define USB3PHY_QSERDES_TXB_BIST_PATTERN5			0x067C
+#define USB3PHY_QSERDES_TXB_BIST_PATTERN6			0x0680
+#define USB3PHY_QSERDES_TXB_BIST_PATTERN7			0x0684
+#define USB3PHY_QSERDES_TXB_BIST_PATTERN8			0x0688
+#define USB3PHY_QSERDES_TXB_LANE_MODE_1				0x068C
+#define USB3PHY_QSERDES_TXB_LANE_MODE_2				0x0690
+#define USB3PHY_QSERDES_TXB_LANE_MODE_3				0x0694
+#define USB3PHY_QSERDES_TXB_ATB_SEL1				0x0698
+#define USB3PHY_QSERDES_TXB_ATB_SEL2				0x069C
+#define USB3PHY_QSERDES_TXB_RCV_DETECT_LVL			0x06A0
+#define USB3PHY_QSERDES_TXB_RCV_DETECT_LVL_2			0x06A4
+#define USB3PHY_QSERDES_TXB_PRBS_SEED1				0x06A8
+#define USB3PHY_QSERDES_TXB_PRBS_SEED2				0x06AC
+#define USB3PHY_QSERDES_TXB_PRBS_SEED3				0x06B0
+#define USB3PHY_QSERDES_TXB_PRBS_SEED4				0x06B4
+#define USB3PHY_QSERDES_TXB_RESET_GEN				0x06B8
+#define USB3PHY_QSERDES_TXB_RESET_GEN_MUXES			0x06BC
+#define USB3PHY_QSERDES_TXB_TRAN_DRVR_EMP_EN			0x06C0
+#define USB3PHY_QSERDES_TXB_TX_INTERFACE_MODE			0x06C4
+#define USB3PHY_QSERDES_TXB_PWM_CTRL				0x06C8
+#define USB3PHY_QSERDES_TXB_PWM_ENCODED_OR_DATA			0x06CC
+#define USB3PHY_QSERDES_TXB_PWM_GEAR_1_DIVIDER_BAND2		0x06D0
+#define USB3PHY_QSERDES_TXB_PWM_GEAR_2_DIVIDER_BAND2		0x06D4
+#define USB3PHY_QSERDES_TXB_PWM_GEAR_3_DIVIDER_BAND2		0x06D8
+#define USB3PHY_QSERDES_TXB_PWM_GEAR_4_DIVIDER_BAND2		0x06DC
+#define USB3PHY_QSERDES_TXB_PWM_GEAR_1_DIVIDER_BAND0_1		0x06E0
+#define USB3PHY_QSERDES_TXB_PWM_GEAR_2_DIVIDER_BAND0_1		0x06E4
+#define USB3PHY_QSERDES_TXB_PWM_GEAR_3_DIVIDER_BAND0_1		0x06E8
+#define USB3PHY_QSERDES_TXB_PWM_GEAR_4_DIVIDER_BAND0_1		0x06EC
+#define USB3PHY_QSERDES_TXB_VMODE_CTRL1				0x06F0
+#define USB3PHY_QSERDES_TXB_ALOG_OBSV_BUS_CTRL_1		0x06F4
+#define USB3PHY_QSERDES_TXB_BIST_STATUS				0x06F8
+#define USB3PHY_QSERDES_TXB_BIST_ERROR_COUNT1			0x06FC
+#define USB3PHY_QSERDES_TXB_BIST_ERROR_COUNT2			0x0700
+#define USB3PHY_QSERDES_TXB_ALOG_OBSV_BUS_STATUS_1		0x0704
+#define USB3PHY_QSERDES_TXB_DIG_BKUP_CTRL			0x0708
+#define USB3PHY_QSERDES_RXB_UCDR_FO_GAIN_HALF			0x0800
+#define USB3PHY_QSERDES_RXB_UCDR_FO_GAIN_QUARTER		0x0804
+#define USB3PHY_QSERDES_RXB_UCDR_FO_GAIN			0x0808
+#define USB3PHY_QSERDES_RXB_UCDR_SO_GAIN_HALF			0x080C
+#define USB3PHY_QSERDES_RXB_UCDR_SO_GAIN_QUARTER		0x0810
+#define USB3PHY_QSERDES_RXB_UCDR_SO_GAIN			0x0814
+#define USB3PHY_QSERDES_RXB_UCDR_SVS_FO_GAIN_HALF		0x0818
+#define USB3PHY_QSERDES_RXB_UCDR_SVS_FO_GAIN_QUARTER		0x081C
+#define USB3PHY_QSERDES_RXB_UCDR_SVS_FO_GAIN			0x0820
+#define USB3PHY_QSERDES_RXB_UCDR_SVS_SO_GAIN_HALF		0x0824
+#define USB3PHY_QSERDES_RXB_UCDR_SVS_SO_GAIN_QUARTER		0x0828
+#define USB3PHY_QSERDES_RXB_UCDR_SVS_SO_GAIN			0x082C
+#define USB3PHY_QSERDES_RXB_UCDR_FASTLOCK_FO_GAIN		0x0830
+#define USB3PHY_QSERDES_RXB_UCDR_SO_SATURATION_AND_ENABLE	0x0834
+#define USB3PHY_QSERDES_RXB_UCDR_FO_TO_SO_DELAY			0x0838
+#define USB3PHY_QSERDES_RXB_UCDR_FASTLOCK_COUNT_LOW		0x083C
+#define USB3PHY_QSERDES_RXB_UCDR_FASTLOCK_COUNT_HIGH		0x0840
+#define USB3PHY_QSERDES_RXB_UCDR_PI_CONTROLS			0x0844
+#define USB3PHY_QSERDES_RXB_UCDR_SB2_THRESH1			0x0848
+#define USB3PHY_QSERDES_RXB_UCDR_SB2_THRESH2			0x084C
+#define USB3PHY_QSERDES_RXB_UCDR_SB2_GAIN1			0x0850
+#define USB3PHY_QSERDES_RXB_UCDR_SB2_GAIN2			0x0854
+#define USB3PHY_QSERDES_RXB_AUX_CONTROL				0x0858
+#define USB3PHY_QSERDES_RXB_AUX_DATA_TCOARSE_TFINE		0x085C
+#define USB3PHY_QSERDES_RXB_RCLK_AUXDATA_SEL			0x0860
+#define USB3PHY_QSERDES_RXB_AC_JTAG_ENABLE			0x0864
+#define USB3PHY_QSERDES_RXB_AC_JTAG_INITP			0x0868
+#define USB3PHY_QSERDES_RXB_AC_JTAG_INITN			0x086C
+#define USB3PHY_QSERDES_RXB_AC_JTAG_LVL				0x0870
+#define USB3PHY_QSERDES_RXB_AC_JTAG_MODE			0x0874
+#define USB3PHY_QSERDES_RXB_AC_JTAG_RESET			0x0878
+#define USB3PHY_QSERDES_RXB_RX_TERM_BW				0x087C
+#define USB3PHY_QSERDES_RXB_RX_RCVR_IQ_EN			0x0880
+#define USB3PHY_QSERDES_RXB_RX_IDAC_I_DC_OFFSETS		0x0884
+#define USB3PHY_QSERDES_RXB_RX_IDAC_IBAR_DC_OFFSETS		0x0888
+#define USB3PHY_QSERDES_RXB_RX_IDAC_Q_DC_OFFSETS		0x088C
+#define USB3PHY_QSERDES_RXB_RX_IDAC_QBAR_DC_OFFSETS		0x0890
+#define USB3PHY_QSERDES_RXB_RX_IDAC_A_DC_OFFSETS		0x0894
+#define USB3PHY_QSERDES_RXB_RX_IDAC_ABAR_DC_OFFSETS		0x0898
+#define USB3PHY_QSERDES_RXB_RX_IDAC_EN				0x089C
+#define USB3PHY_QSERDES_RXB_RX_IDAC_ENABLES			0x08A0
+#define USB3PHY_QSERDES_RXB_RX_IDAC_SIGN			0x08A4
+#define USB3PHY_QSERDES_RXB_RX_HIGHZ_HIGHRATE			0x08A8
+#define USB3PHY_QSERDES_RXB_RX_TERM_AC_BYPASS_DC_COUPLE_OFFSET	0x08AC
+#define USB3PHY_QSERDES_RXB_DFE_1				0x08B0
+#define USB3PHY_QSERDES_RXB_DFE_2				0x08B4
+#define USB3PHY_QSERDES_RXB_DFE_3				0x08B8
+#define USB3PHY_QSERDES_RXB_VGA_CAL_CNTRL1			0x08BC
+#define USB3PHY_QSERDES_RXB_VGA_CAL_CNTRL2			0x08C0
+#define USB3PHY_QSERDES_RXB_GM_CAL				0x08C4
+#define USB3PHY_QSERDES_RXB_RX_EQ_GAIN2_LSB			0x08C8
+#define USB3PHY_QSERDES_RXB_RX_EQ_GAIN2_MSB			0x08CC
+#define USB3PHY_QSERDES_RXB_RX_EQU_ADAPTOR_CNTRL1		0x08D0
+#define USB3PHY_QSERDES_RXB_RX_EQU_ADAPTOR_CNTRL2		0x08D4
+#define USB3PHY_QSERDES_RXB_RX_EQU_ADAPTOR_CNTRL3		0x08D8
+#define USB3PHY_QSERDES_RXB_RX_EQU_ADAPTOR_CNTRL4		0x08DC
+#define USB3PHY_QSERDES_RXB_RX_IDAC_TSETTLE_LOW			0x08E0
+#define USB3PHY_QSERDES_RXB_RX_IDAC_TSETTLE_HIGH		0x08E4
+#define USB3PHY_QSERDES_RXB_RX_IDAC_MEASURE_TIME		0x08E8
+#define USB3PHY_QSERDES_RXB_RX_IDAC_ACCUMULATOR			0x08EC
+#define USB3PHY_QSERDES_RXB_RX_EQ_OFFSET_LSB			0x08F0
+#define USB3PHY_QSERDES_RXB_RX_EQ_OFFSET_MSB			0x08F4
+#define USB3PHY_QSERDES_RXB_RX_EQ_OFFSET_ADAPTOR_CNTRL1		0x08F8
+#define USB3PHY_QSERDES_RXB_RX_OFFSET_ADAPTOR_CNTRL2		0x08FC
+#define USB3PHY_QSERDES_RXB_SIGDET_ENABLES			0x0900
+#define USB3PHY_QSERDES_RXB_SIGDET_CNTRL			0x0904
+#define USB3PHY_QSERDES_RXB_SIGDET_LVL				0x0908
+#define USB3PHY_QSERDES_RXB_SIGDET_DEGLITCH_CNTRL		0x090C
+#define USB3PHY_QSERDES_RXB_RX_BAND				0x0910
+#define USB3PHY_QSERDES_RXB_CDR_FREEZE_UP_DN			0x0914
+#define USB3PHY_QSERDES_RXB_CDR_RESET_OVERRIDE			0x0918
+#define USB3PHY_QSERDES_RXB_RX_INTERFACE_MODE			0x091C
+#define USB3PHY_QSERDES_RXB_JITTER_GEN_MODE			0x0920
+#define USB3PHY_QSERDES_RXB_BUJ_AMP				0x0924
+#define USB3PHY_QSERDES_RXB_SJ_AMP1				0x0928
+#define USB3PHY_QSERDES_RXB_SJ_AMP2				0x092C
+#define USB3PHY_QSERDES_RXB_SJ_PER1				0x0930
+#define USB3PHY_QSERDES_RXB_SJ_PER2				0x0934
+#define USB3PHY_QSERDES_RXB_BUJ_STEP_FREQ1			0x0938
+#define USB3PHY_QSERDES_RXB_BUJ_STEP_FREQ2			0x093C
+#define USB3PHY_QSERDES_RXB_PPM_OFFSET1				0x0940
+#define USB3PHY_QSERDES_RXB_PPM_OFFSET2				0x0944
+#define USB3PHY_QSERDES_RXB_SIGN_PPM_PERIOD1			0x0948
+#define USB3PHY_QSERDES_RXB_SIGN_PPM_PERIOD2			0x094C
+#define USB3PHY_QSERDES_RXB_RX_PWM_ENABLE_AND_DATA		0x0950
+#define USB3PHY_QSERDES_RXB_RX_PWM_GEAR1_TIMEOUT_COUNT		0x0954
+#define USB3PHY_QSERDES_RXB_RX_PWM_GEAR2_TIMEOUT_COUNT		0x0958
+#define USB3PHY_QSERDES_RXB_RX_PWM_GEAR3_TIMEOUT_COUNT		0x095C
+#define USB3PHY_QSERDES_RXB_RX_PWM_GEAR4_TIMEOUT_COUNT		0x0960
+#define USB3PHY_QSERDES_RXB_RX_MODE_00				0x0964
+#define USB3PHY_QSERDES_RXB_RX_MODE_01				0x0968
+#define USB3PHY_QSERDES_RXB_RX_MODE_10				0x096C
+#define USB3PHY_QSERDES_RXB_ALOG_OBSV_BUS_CTRL_1		0x0970
+#define USB3PHY_QSERDES_RXB_PI_CTRL1				0x0974
+#define USB3PHY_QSERDES_RXB_PI_CTRL2				0x0978
+#define USB3PHY_QSERDES_RXB_PI_QUAD				0x097C
+#define USB3PHY_QSERDES_RXB_IDATA1				0x0980
+#define USB3PHY_QSERDES_RXB_IDATA2				0x0984
+#define USB3PHY_QSERDES_RXB_AUX_DATA1				0x0988
+#define USB3PHY_QSERDES_RXB_AUX_DATA2				0x098C
+#define USB3PHY_QSERDES_RXB_AC_JTAG_OUTP			0x0990
+#define USB3PHY_QSERDES_RXB_AC_JTAG_OUTN			0x0994
+#define USB3PHY_QSERDES_RXB_RX_SIGDET				0x0998
+#define USB3PHY_QSERDES_RXB_IDAC_STATUS_I			0x099C
+#define USB3PHY_QSERDES_RXB_IDAC_STATUS_IBAR			0x09A0
+#define USB3PHY_QSERDES_RXB_IDAC_STATUS_Q			0x09A4
+#define USB3PHY_QSERDES_RXB_IDAC_STATUS_QBAR			0x09A8
+#define USB3PHY_QSERDES_RXB_IDAC_STATUS_A			0x09AC
+#define USB3PHY_QSERDES_RXB_IDAC_STATUS_ABAR			0x09B0
+#define USB3PHY_QSERDES_RXB_IDAC_STATUS_SM_ON			0x09B4
+#define USB3PHY_QSERDES_RXB_IDAC_STATUS_CAL_DONE		0x09B8
+#define USB3PHY_QSERDES_RXB_IDAC_STATUS_SIGNERROR		0x09BC
+#define USB3PHY_QSERDES_RXB_READ_EQCODE				0x09C0
+#define USB3PHY_QSERDES_RXB_READ_OFFSETCODE			0x09C4
+#define USB3PHY_QSERDES_RXB_IA_ERROR_COUNTER_LOW		0x09C8
+#define USB3PHY_QSERDES_RXB_IA_ERROR_COUNTER_HIGH		0x09CC
+#define USB3PHY_QSERDES_RXB_VGA_READ_CODE			0x09D0
+#define USB3PHY_QSERDES_RXB_DFE_TAP1_READ_CODE			0x09D4
+#define USB3PHY_QSERDES_RXB_DFE_TAP2_READ_CODE			0x09D8
+#define USB3PHY_QSERDES_RXB_ALOG_OBSV_BUS_STATUS_1		0x09DC
+#define USB3PHY_PCS_MISC_TYPEC_CTRL				0x0A00
+#define USB3PHY_PCS_MISC_TYPEC_STATUS				0x0A04
+#define USB3PHY_PCS_MISC_DEBUG_BUS_BYTE0_INDEX			0x0A08
+#define USB3PHY_PCS_MISC_DEBUG_BUS_BYTE1_INDEX			0x0A0C
+#define USB3PHY_PCS_MISC_DEBUG_BUS_BYTE2_INDEX			0x0A10
+#define USB3PHY_PCS_MISC_DEBUG_BUS_BYTE3_INDEX			0x0A14
+#define USB3PHY_PCS_MISC_PLACEHOLDER_STATUS			0x0A18
+#define USB3PHY_PCS_MISC_DEBUG_BUS_0_STATUS			0x0A1C
+#define USB3PHY_PCS_MISC_DEBUG_BUS_1_STATUS			0x0A20
+#define USB3PHY_PCS_MISC_DEBUG_BUS_2_STATUS			0x0A24
+#define USB3PHY_PCS_MISC_DEBUG_BUS_3_STATUS			0x0A28
+#define USB3PHY_PCS_MISC_BIST_CTRL				0x0A2C
+#define USB3PHY_PCS_SW_RESET					0x0C00
+#define USB3PHY_PCS_POWER_DOWN_CONTROL				0x0C04
+#define USB3PHY_PCS_START_CONTROL				0x0C08
+#define USB3PHY_PCS_TXMGN_V0					0x0C0C
+#define USB3PHY_PCS_TXMGN_V1					0x0C10
+#define USB3PHY_PCS_TXMGN_V2					0x0C14
+#define USB3PHY_PCS_TXMGN_V3					0x0C18
+#define USB3PHY_PCS_TXMGN_V4					0x0C1C
+#define USB3PHY_PCS_TXMGN_LS					0x0C20
+#define USB3PHY_PCS_TXDEEMPH_M6DB_V0				0x0C24
+#define USB3PHY_PCS_TXDEEMPH_M3P5DB_V0				0x0C28
+#define USB3PHY_PCS_TXDEEMPH_M6DB_V1				0x0C2C
+#define USB3PHY_PCS_TXDEEMPH_M3P5DB_V1				0x0C30
+#define USB3PHY_PCS_TXDEEMPH_M6DB_V2				0x0C34
+#define USB3PHY_PCS_TXDEEMPH_M3P5DB_V2				0x0C38
+#define USB3PHY_PCS_TXDEEMPH_M6DB_V3				0x0C3C
+#define USB3PHY_PCS_TXDEEMPH_M3P5DB_V3				0x0C40
+#define USB3PHY_PCS_TXDEEMPH_M6DB_V4				0x0C44
+#define USB3PHY_PCS_TXDEEMPH_M3P5DB_V4				0x0C48
+#define USB3PHY_PCS_TXDEEMPH_M6DB_LS				0x0C4C
+#define USB3PHY_PCS_TXDEEMPH_M3P5DB_LS				0x0C50
+#define USB3PHY_PCS_ENDPOINT_REFCLK_DRIVE			0x0C54
+#define USB3PHY_PCS_RX_IDLE_DTCT_CNTRL				0x0C58
+#define USB3PHY_PCS_RATE_SLEW_CNTRL				0x0C5C
+#define USB3PHY_PCS_POWER_STATE_CONFIG1				0x0C60
+#define USB3PHY_PCS_POWER_STATE_CONFIG2				0x0C64
+#define USB3PHY_PCS_POWER_STATE_CONFIG3				0x0C68
+#define USB3PHY_PCS_POWER_STATE_CONFIG4				0x0C6C
+#define USB3PHY_PCS_RCVR_DTCT_DLY_P1U2_L			0x0C70
+#define USB3PHY_PCS_RCVR_DTCT_DLY_P1U2_H			0x0C74
+#define USB3PHY_PCS_RCVR_DTCT_DLY_U3_L				0x0C78
+#define USB3PHY_PCS_RCVR_DTCT_DLY_U3_H				0x0C7C
+#define USB3PHY_PCS_LOCK_DETECT_CONFIG1				0x0C80
+#define USB3PHY_PCS_LOCK_DETECT_CONFIG2				0x0C84
+#define USB3PHY_PCS_LOCK_DETECT_CONFIG3				0x0C88
+#define USB3PHY_PCS_TSYNC_RSYNC_TIME				0x0C8C
+#define USB3PHY_PCS_SIGDET_LOW_2_IDLE_TIME			0x0C90
+#define USB3PHY_PCS_BEACON_2_IDLE_TIME_L			0x0C94
+#define USB3PHY_PCS_BEACON_2_IDLE_TIME_H			0x0C98
+#define USB3PHY_PCS_PWRUP_RESET_DLY_TIME_SYSCLK			0x0C9C
+#define USB3PHY_PCS_PWRUP_RESET_DLY_TIME_AUXCLK			0x0CA0
+#define USB3PHY_PCS_LP_WAKEUP_DLY_TIME_AUXCLK			0x0CA4
+#define USB3PHY_PCS_PLL_LOCK_CHK_DLY_TIME			0x0CA8
+#define USB3PHY_PCS_LFPS_DET_HIGH_COUNT_VAL			0x0CAC
+#define USB3PHY_PCS_LFPS_TX_ECSTART_EQTLOCK			0x0CB0
+#define USB3PHY_PCS_LFPS_TX_END_CNT_P2U3_START			0x0CB4
+#define USB3PHY_PCS_RXEQTRAINING_WAIT_TIME			0x0CB8
+#define USB3PHY_PCS_RXEQTRAINING_RUN_TIME			0x0CBC
+#define USB3PHY_PCS_TXONESZEROS_RUN_LENGTH			0x0CC0
+#define USB3PHY_PCS_FLL_CNTRL1					0x0CC4
+#define USB3PHY_PCS_FLL_CNTRL2					0x0CC8
+#define USB3PHY_PCS_FLL_CNT_VAL_L				0x0CCC
+#define USB3PHY_PCS_FLL_CNT_VAL_H_TOL				0x0CD0
+#define USB3PHY_PCS_FLL_MAN_CODE				0x0CD4
+#define USB3PHY_PCS_AUTONOMOUS_MODE_CTRL			0x0CD8
+#define USB3PHY_PCS_LFPS_RXTERM_IRQ_CLEAR			0x0CDC
+#define USB3PHY_PCS_ARCVR_DTCT_EN_PERIOD			0x0CE0
+#define USB3PHY_PCS_ARCVR_DTCT_CM_DLY				0x0CE4
+#define USB3PHY_PCS_ALFPS_DEGLITCH_VAL				0x0CE8
+#define USB3PHY_PCS_INSIG_SW_CTRL1				0x0CEC
+#define USB3PHY_PCS_INSIG_SW_CTRL2				0x0CF0
+#define USB3PHY_PCS_INSIG_SW_CTRL3				0x0CF4
+#define USB3PHY_PCS_INSIG_MX_CTRL1				0x0CF8
+#define USB3PHY_PCS_INSIG_MX_CTRL2				0x0CFC
+#define USB3PHY_PCS_INSIG_MX_CTRL3				0x0D00
+#define USB3PHY_PCS_OUTSIG_SW_CTRL1				0x0D04
+#define USB3PHY_PCS_OUTSIG_MX_CTRL1				0x0D08
+#define USB3PHY_PCS_CLK_DEBUG_BYPASS_CTRL			0x0D0C
+#define USB3PHY_PCS_TEST_CONTROL				0x0D10
+#define USB3PHY_PCS_TEST_CONTROL2				0x0D14
+#define USB3PHY_PCS_TEST_CONTROL3				0x0D18
+#define USB3PHY_PCS_TEST_CONTROL4				0x0D1C
+#define USB3PHY_PCS_TEST_CONTROL5				0x0D20
+#define USB3PHY_PCS_TEST_CONTROL6				0x0D24
+#define USB3PHY_PCS_TEST_CONTROL7				0x0D28
+#define USB3PHY_PCS_COM_RESET_CONTROL				0x0D2C
+#define USB3PHY_PCS_BIST_CTRL					0x0D30
+#define USB3PHY_PCS_PRBS_POLY0					0x0D34
+#define USB3PHY_PCS_PRBS_POLY1					0x0D38
+#define USB3PHY_PCS_PRBS_SEED0					0x0D3C
+#define USB3PHY_PCS_PRBS_SEED1					0x0D40
+#define USB3PHY_PCS_FIXED_PAT_CTRL				0x0D44
+#define USB3PHY_PCS_FIXED_PAT0					0x0D48
+#define USB3PHY_PCS_FIXED_PAT1					0x0D4C
+#define USB3PHY_PCS_FIXED_PAT2					0x0D50
+#define USB3PHY_PCS_FIXED_PAT3					0x0D54
+#define USB3PHY_PCS_COM_CLK_SWITCH_CTRL				0x0D58
+#define USB3PHY_PCS_ELECIDLE_DLY_SEL				0x0D5C
+#define USB3PHY_PCS_SPARE1					0x0D60
+#define USB3PHY_PCS_BIST_CHK_ERR_CNT_L_STATUS			0x0D64
+#define USB3PHY_PCS_BIST_CHK_ERR_CNT_H_STATUS			0x0D68
+#define USB3PHY_PCS_BIST_CHK_STATUS				0x0D6C
+#define USB3PHY_PCS_LFPS_RXTERM_IRQ_SOURCE_STATUS		0x0D70
+#define USB3PHY_PCS_PCS_STATUS					0x0D74
+#define USB3PHY_PCS_PCS_STATUS2					0x0D78
+#define USB3PHY_PCS_PCS_STATUS3					0x0D7C
+#define USB3PHY_PCS_COM_RESET_STATUS				0x0D80
+#define USB3PHY_PCS_OSC_DTCT_STATUS				0x0D84
+#define USB3PHY_PCS_REVISION_ID0				0x0D88
+#define USB3PHY_PCS_REVISION_ID1				0x0D8C
+#define USB3PHY_PCS_REVISION_ID2				0x0D90
+#define USB3PHY_PCS_REVISION_ID3				0x0D94
+#define USB3PHY_PCS_DEBUG_BUS_0_STATUS				0x0D98
+#define USB3PHY_PCS_DEBUG_BUS_1_STATUS				0x0D9C
+#define USB3PHY_PCS_DEBUG_BUS_2_STATUS				0x0DA0
+#define USB3PHY_PCS_DEBUG_BUS_3_STATUS				0x0DA4
+#define USB3PHY_PCS_LP_WAKEUP_DLY_TIME_AUXCLK_MSB		0x0DA8
+#define USB3PHY_PCS_OSC_DTCT_ACTIONS				0x0DAC
+#define USB3PHY_PCS_SIGDET_CNTRL				0x0DB0
+#define USB3PHY_PCS_IDAC_CAL_CNTRL				0x0DB4
+#define USB3PHY_PCS_CMN_ACK_OUT_SEL				0x0DB8
+#define USB3PHY_PCS_PLL_LOCK_CHK_DLY_TIME_SYSCLK		0x0DBC
+#define USB3PHY_PCS_AUTONOMOUS_MODE_STATUS			0x0DC0
+#define USB3PHY_PCS_ENDPOINT_REFCLK_CNTRL			0x0DC4
+#define USB3PHY_PCS_EPCLK_PRE_PLL_LOCK_DLY_SYSCLK		0x0DC8
+#define USB3PHY_PCS_EPCLK_PRE_PLL_LOCK_DLY_AUXCLK		0x0DCC
+#define USB3PHY_PCS_EPCLK_DLY_COUNT_VAL_L			0x0DD0
+#define USB3PHY_PCS_EPCLK_DLY_COUNT_VAL_H			0x0DD4
+#define USB3PHY_PCS_RX_SIGDET_LVL				0x0DD8
+#define USB3PHY_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB		0x0DDC
+#define USB3PHY_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB		0x0DE0
+#define USB3PHY_PCS_AUTONOMOUS_MODE_CTRL2			0x0DE4
+#define USB3PHY_PCS_RXTERMINATION_DLY_SEL			0x0DE8
+#define USB3PHY_PCS_LFPS_PER_TIMER_VAL				0x0DEC
+#define USB3PHY_PCS_SIGDET_STARTUP_TIMER_VAL			0x0DF0
+#define USB3PHY_PCS_LOCK_DETECT_CONFIG4				0x0DF4
+#define USB3PHY_PCS_RX_SIGDET_DTCT_CNTRL			0x0DF8
+#define USB3PHY_PCS_PCS_STATUS4					0x0DFC
+#define USB3PHY_PCS_PCS_STATUS4_CLEAR				0x0E00
+#define USB3PHY_PCS_DEC_ERROR_COUNT_STATUS			0x0E04
+#define USB3PHY_PCS_COMMA_POS_STATUS				0x0E08
+#define USB3PHY_PCS_REFGEN_REQ_CONFIG1				0x0E0C
+#define USB3PHY_PCS_REFGEN_REQ_CONFIG2				0x0E10
+#define USB3PHY_PCS_REFGEN_REQ_CONFIG3				0x0E14
+
+#endif /* _DT_BINDINGS_PHY_QCOM_11NM_QMP_COMBO_USB_H */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index b4d23b3..59a416d 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -1291,10 +1291,15 @@ static inline int lpit_read_residency_count_address(u64 *address)
 #endif
 
 #ifdef CONFIG_ACPI_PPTT
+int acpi_pptt_cpu_is_thread(unsigned int cpu);
 int find_acpi_cpu_topology(unsigned int cpu, int level);
 int find_acpi_cpu_topology_package(unsigned int cpu);
 int find_acpi_cpu_cache_topology(unsigned int cpu, int level);
 #else
+static inline int acpi_pptt_cpu_is_thread(unsigned int cpu)
+{
+	return -EINVAL;
+}
 static inline int find_acpi_cpu_topology(unsigned int cpu, int level)
 {
 	return -EINVAL;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 0801ef9..efa15cf 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -435,6 +435,7 @@ extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
 extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
 extern void bio_put(struct bio *);
 
+extern void bio_clone_crypt_key(struct bio *dst, const struct bio *src);
 extern void __bio_clone_fast(struct bio *, struct bio *);
 extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
 
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 3705c6f..89557544 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -466,7 +466,12 @@ struct sock_fprog_kern {
 	struct sock_filter	*filter;
 };
 
+#define BPF_BINARY_HEADER_MAGIC	0x05de0e82
+
 struct bpf_binary_header {
+#ifdef CONFIG_CFI_CLANG
+	u32 magic;
+#endif
 	u32 pages;
 	/* Some arches need word alignment for their instructions */
 	u8 image[] __aligned(4);
@@ -506,7 +511,62 @@ struct sk_filter {
 	struct bpf_prog	*prog;
 };
 
-#define BPF_PROG_RUN(filter, ctx)  (*(filter)->bpf_func)(ctx, (filter)->insnsi)
+#if IS_ENABLED(CONFIG_BPF_JIT) && IS_ENABLED(CONFIG_CFI_CLANG)
+/*
+ * With JIT, the kernel makes an indirect call to dynamically generated
+ * code. Use bpf_call_func to perform additional validation of the call
+ * target to narrow down attack surface. Architectures implementing BPF
+ * JIT can override arch_bpf_jit_check_func for arch-specific checking.
+ */
+extern bool arch_bpf_jit_check_func(const struct bpf_prog *prog);
+
+static inline unsigned int __bpf_call_func(const struct bpf_prog *prog,
+					   const void *ctx)
+{
+	/* Call interpreter with CFI checking. */
+	return prog->bpf_func(ctx, prog->insnsi);
+}
+
+static inline struct bpf_binary_header *
+bpf_jit_binary_hdr(const struct bpf_prog *fp);
+
+static inline unsigned int __nocfi bpf_call_func(const struct bpf_prog *prog,
+						 const void *ctx)
+{
+	const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
+
+	if (!IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) && !prog->jited)
+		return __bpf_call_func(prog, ctx);
+
+	/*
+	 * We are about to call dynamically generated code. Check that the
+	 * page has bpf_binary_header with a valid magic to limit possible
+	 * call targets.
+	 */
+	BUG_ON(hdr->magic != BPF_BINARY_HEADER_MAGIC ||
+		!arch_bpf_jit_check_func(prog));
+
+	/* Call jited function without CFI checking. */
+	return prog->bpf_func(ctx, prog->insnsi);
+}
+
+static inline void bpf_jit_set_header_magic(struct bpf_binary_header *hdr)
+{
+	hdr->magic = BPF_BINARY_HEADER_MAGIC;
+}
+#else
+static inline unsigned int bpf_call_func(const struct bpf_prog *prog,
+					 const void *ctx)
+{
+	return prog->bpf_func(ctx, prog->insnsi);
+}
+
+static inline void bpf_jit_set_header_magic(struct bpf_binary_header *hdr)
+{
+}
+#endif
+
+#define BPF_PROG_RUN(filter, ctx)  bpf_call_func(filter, ctx)
 
 #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
 
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 9493d4a..8fde789 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -225,7 +225,7 @@ enum hwmon_power_attributes {
 #define HWMON_P_LABEL			BIT(hwmon_power_label)
 #define HWMON_P_ALARM			BIT(hwmon_power_alarm)
 #define HWMON_P_CAP_ALARM		BIT(hwmon_power_cap_alarm)
-#define HWMON_P_MIN_ALARM		BIT(hwmon_power_max_alarm)
+#define HWMON_P_MIN_ALARM		BIT(hwmon_power_min_alarm)
 #define HWMON_P_MAX_ALARM		BIT(hwmon_power_max_alarm)
 #define HWMON_P_LCRIT_ALARM		BIT(hwmon_power_lcrit_alarm)
 #define HWMON_P_CRIT_ALARM		BIT(hwmon_power_crit_alarm)
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index da3a837..c834782 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -3189,16 +3189,16 @@ struct element {
 	u8 id;
 	u8 datalen;
 	u8 data[];
-};
+} __packed;
 
 /* element iteration helpers */
-#define for_each_element(element, _data, _datalen)			\
-	for (element = (void *)(_data);					\
-	     (u8 *)(_data) + (_datalen) - (u8 *)element >=		\
-		sizeof(*element) &&					\
-	     (u8 *)(_data) + (_datalen) - (u8 *)element >=		\
-		sizeof(*element) + element->datalen;			\
-	     element = (void *)(element->data + element->datalen))
+#define for_each_element(_elem, _data, _datalen)			\
+	for (_elem = (const struct element *)(_data);			\
+	     (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >=	\
+		(int)sizeof(*_elem) &&					\
+	     (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >=	\
+		(int)sizeof(*_elem) + _elem->datalen;			\
+	     _elem = (const struct element *)(_elem->data + _elem->datalen))
 
 #define for_each_element_id(element, _id, data, datalen)		\
 	for_each_element(element, data, datalen)			\
@@ -3235,7 +3235,7 @@ struct element {
 static inline bool for_each_element_completed(const struct element *element,
 					      const void *data, size_t datalen)
 {
-	return (u8 *)element == (u8 *)data + datalen;
+	return (const u8 *)element == (const u8 *)data + datalen;
 }
 
 #endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/init.h b/include/linux/init.h
index 847e853..a223531 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -314,6 +314,8 @@ void __init parse_early_options(char *cmdline);
 /* Data marked not to be saved by software suspend */
 #define __nosavedata __section(.data..nosave)
 
+#define __rticdata  __attribute__((section(".bss.rtic")))
+
 #ifdef MODULE
 #define __exit_p(x) x
 #else
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index 93efd91..83768b3 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -313,6 +313,7 @@ struct mhi_controller {
 	enum mhi_dev_state dev_state;
 	enum mhi_dev_state saved_dev_state;
 	bool wake_set;
+	bool ignore_override;
 	atomic_t dev_wake;
 	atomic_t alloc_size;
 	atomic_t pending_pkts;
@@ -329,7 +330,6 @@ struct mhi_controller {
 	/* worker for different state transitions */
 	struct work_struct st_worker;
 	struct work_struct fw_worker;
-	struct work_struct syserr_worker;
 	struct work_struct low_priority_worker;
 	wait_queue_head_t state_event;
 
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 45d34c8..92ea284c 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -38,6 +38,12 @@ struct oom_control {
 	 */
 	const int order;
 
+	/*
+	 * Only kill positive adj tasks. Used to behave more like Android's
+	 * lowmemorykiller.
+	 */
+	const bool only_positive_adj;
+
 	/* Used by oom implementation, do not set */
 	unsigned long totalpages;
 	struct task_struct *chosen;
@@ -99,7 +105,7 @@ bool __oom_reap_task_mm(struct mm_struct *mm);
 
 extern unsigned long oom_badness(struct task_struct *p,
 		struct mem_cgroup *memcg, const nodemask_t *nodemask,
-		unsigned long totalpages);
+		unsigned long totalpages, bool only_positive_adj);
 
 extern bool out_of_memory(struct oom_control *oc);
 
@@ -117,14 +123,18 @@ extern void dump_tasks(struct mem_cgroup *memcg,
 		       const nodemask_t *nodemask);
 
 #ifdef CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER
-extern bool should_ulmk_retry(void);
+extern bool should_ulmk_retry(gfp_t gfp);
 extern void ulmk_update_last_kill(void);
+extern void ulmk_watchdog_fn(struct timer_list *t);
+extern void ulmk_watchdog_pet(struct timer_list *t);
 #else
-static inline bool should_ulmk_retry(void)
+static inline bool should_ulmk_retry(gfp_t gfp)
 {
 	return false;
 }
 static inline void ulmk_update_last_kill(void) {}
+static inline void ulmk_watchdog_fn(struct timer_list *t) {}
+static inline void ulmk_watchdog_pet(struct timer_list *t) {}
 #endif
 
 /* sysctls */
@@ -135,5 +145,6 @@ extern int sysctl_reap_mem_on_sigkill;
 
 /* calls for LMK reaper */
 extern void add_to_oom_reaper(struct task_struct *p);
+extern void check_panic_on_foreground_kill(struct task_struct *p);
 #define ULMK_MAGIC "lmkd"
 #endif /* _INCLUDE_LINUX_OOM_H */
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 1111423..8188789 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -343,6 +343,7 @@ enum power_supply_property {
 	POWER_SUPPLY_PROP_ADAPTER_CC_MODE,
 	POWER_SUPPLY_PROP_SKIN_HEALTH,
 	POWER_SUPPLY_PROP_AICL_DONE,
+	POWER_SUPPLY_PROP_VOLTAGE_STEP,
 	/* Charge pump properties */
 	POWER_SUPPLY_PROP_CP_STATUS1,
 	POWER_SUPPLY_PROP_CP_STATUS2,
diff --git a/include/linux/psi.h b/include/linux/psi.h
index 8096c4e..cb7cde3 100644
--- a/include/linux/psi.h
+++ b/include/linux/psi.h
@@ -24,6 +24,7 @@ void psi_memstall_leave(unsigned long *flags);
 int psi_show(struct seq_file *s, struct psi_group *group, enum psi_res res);
 
 void psi_emergency_trigger(void);
+bool psi_is_trigger_active(void);
 
 #ifdef CONFIG_CGROUPS
 int psi_cgroup_alloc(struct cgroup *cgrp);
@@ -46,6 +47,10 @@ static inline void psi_memstall_enter(unsigned long *flags) {}
 static inline void psi_memstall_leave(unsigned long *flags) {}
 
 static inline void psi_emergency_trigger(void){}
+static inline bool psi_is_trigger_active(void)
+{
+	return false;
+}
 
 #ifdef CONFIG_CGROUPS
 static inline int psi_cgroup_alloc(struct cgroup *cgrp)
diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
index eec824f..538ec2b 100644
--- a/include/linux/psi_types.h
+++ b/include/linux/psi_types.h
@@ -6,6 +6,7 @@
 #include <linux/types.h>
 #include <linux/kref.h>
 #include <linux/wait.h>
+#include <linux/timer.h>
 
 #ifdef CONFIG_PSI
 
@@ -126,6 +127,7 @@ struct psi_trigger {
 
 	/* Task that created the trigger */
 	char comm[TASK_COMM_LEN];
+	struct timer_list wdog_timer;
 };
 
 struct psi_group {
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 09fc090..7d4fea4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1971,7 +1971,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
 # define vcpu_is_preempted(cpu)	false
 #endif
 
-extern long msm_sched_setaffinity(pid_t pid, struct cpumask *new_mask);
 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
 
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 0d10b7c..e9d4e38 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -330,6 +330,8 @@ enum {
 
 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
 {
+	if (current->mm != mm)
+		return;
 	if (likely(!(atomic_read(&mm->membarrier_state) &
 		     MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
 		return;
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index 3e697fe..de7a412 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -164,7 +164,6 @@ struct usb_phy {
 			enum usb_device_speed speed);
 	int	(*notify_disconnect)(struct usb_phy *x,
 			enum usb_device_speed speed);
-	int	(*link_training)(struct usb_phy *x, bool start);
 
 	/*
 	 * Charger detection method can be implemented if you need to
@@ -354,24 +353,6 @@ usb_phy_notify_connect(struct usb_phy *x, enum usb_device_speed speed)
 }
 
 static inline int
-usb_phy_start_link_training(struct usb_phy *x)
-{
-	if (x && x->link_training)
-		return x->link_training(x, true);
-	else
-		return 0;
-}
-
-static inline int
-usb_phy_stop_link_training(struct usb_phy *x)
-{
-	if (x && x->link_training)
-		return x->link_training(x, false);
-	else
-		return 0;
-}
-
-static inline int
 usb_phy_notify_disconnect(struct usb_phy *x, enum usb_device_speed speed)
 {
 	if (x && x->notify_disconnect)
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index e2a5895..1495cd7 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -5266,22 +5266,20 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
  * @bssid: transmitter BSSID
  * @max_bssid: max BSSID indicator, taken from Multiple BSSID element
  * @mbssid_index: BSSID index, taken from Multiple BSSID index element
- * @new_bssid_addr: address of the resulting BSSID
+ * @new_bssid: calculated nontransmitted BSSID
  */
 static inline void cfg80211_gen_new_bssid(const u8 *bssid, u8 max_bssid,
-					  u8 mbssid_index, u8 *new_bssid_addr)
+					  u8 mbssid_index, u8 *new_bssid)
 {
-	u64 bssid_tmp, new_bssid;
-	u64 lsb_n;
+	u64 bssid_u64 = ether_addr_to_u64(bssid);
+	u64 mask = GENMASK_ULL(max_bssid - 1, 0);
+	u64 new_bssid_u64;
 
-	bssid_tmp = ether_addr_to_u64(bssid);
+	new_bssid_u64 = bssid_u64 & ~mask;
 
-	lsb_n = bssid_tmp & ((1 << max_bssid) - 1);
-	new_bssid = bssid_tmp;
-	new_bssid &= ~((1 << max_bssid) - 1);
-	new_bssid |= (lsb_n + mbssid_index) % (1 << max_bssid);
+	new_bssid_u64 |= ((bssid_u64 & mask) + mbssid_index) & mask;
 
-	u64_to_ether_addr(new_bssid, new_bssid_addr);
+	u64_to_ether_addr(new_bssid_u64, new_bssid);
 }
 
 /**
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index 2b7e227..91f4033 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -32,6 +32,7 @@ extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
 struct scsi_eh_save {
 	/* saved state */
 	int result;
+	unsigned int resid_len;
 	int eh_eflags;
 	enum dma_data_direction data_direction;
 	unsigned underflow;
diff --git a/include/soc/qcom/rmnet_ctl.h b/include/soc/qcom/rmnet_ctl.h
index 0080560..94177ec 100644
--- a/include/soc/qcom/rmnet_ctl.h
+++ b/include/soc/qcom/rmnet_ctl.h
@@ -10,6 +10,22 @@
 
 #include <linux/skbuff.h>
 
+enum rmnet_ctl_log_lvl {
+	RMNET_CTL_LOG_CRIT,
+	RMNET_CTL_LOG_ERR,
+	RMNET_CTL_LOG_INFO,
+	RMNET_CTL_LOG_DEBUG,
+};
+
+#define rmnet_ctl_log_err(msg, rc, data, len) \
+		rmnet_ctl_log(RMNET_CTL_LOG_ERR, msg, rc, data, len)
+
+#define rmnet_ctl_log_info(msg, data, len) \
+		rmnet_ctl_log(RMNET_CTL_LOG_INFO, msg, 0, data, len)
+
+#define rmnet_ctl_log_debug(msg, data, len) \
+		rmnet_ctl_log(RMNET_CTL_LOG_DEBUG, msg, 0, data, len)
+
 struct rmnet_ctl_client_hooks {
 	void (*ctl_dl_client_hook)(struct sk_buff *skb);
 };
@@ -19,6 +35,8 @@ struct rmnet_ctl_client_hooks {
 void *rmnet_ctl_register_client(struct rmnet_ctl_client_hooks *hook);
 int rmnet_ctl_unregister_client(void *handle);
 int rmnet_ctl_send_client(void *handle, struct sk_buff *skb);
+void rmnet_ctl_log(enum rmnet_ctl_log_lvl lvl, const char *msg,
+		   int rc, const void *data, unsigned int len);
 
 #else
 
@@ -38,6 +56,11 @@ static inline int rmnet_ctl_send_client(void *handle, struct sk_buff *skb)
 	return -EINVAL;
 }
 
+static inline void rmnet_ctl_log(enum rmnet_ctl_log_lvl lvl, const char *msg,
+				 int rc, const void *data, unsigned int len)
+{
+}
+
 #endif /* CONFIG_RMNET_CTL */
 
 #endif /* _RMNET_CTL_H_ */
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 21f1498..d183de5 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -358,6 +358,8 @@ struct device;
 #define SND_SOC_DAPM_WILL_PMD   0x80    /* called at start of sequence */
 #define SND_SOC_DAPM_PRE_POST_PMD \
 				(SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD)
+#define SND_SOC_DAPM_PRE_POST_PMU \
+				(SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU)
 
 /* convenience event type detection */
 #define SND_SOC_DAPM_EVENT_ON(e)	\
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index b401c4e..eb3f668 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -1655,6 +1655,7 @@ TRACE_EVENT(qgroup_update_reserve,
 		__entry->qgid		= qgroup->qgroupid;
 		__entry->cur_reserved	= qgroup->rsv.values[type];
 		__entry->diff		= diff;
+		__entry->type		= type;
 	),
 
 	TP_printk_btrfs("qgid=%llu type=%s cur_reserved=%llu diff=%lld",
@@ -1677,6 +1678,7 @@ TRACE_EVENT(qgroup_meta_reserve,
 	TP_fast_assign_btrfs(root->fs_info,
 		__entry->refroot	= root->objectid;
 		__entry->diff		= diff;
+		__entry->type		= type;
 	),
 
 	TP_printk_btrfs("refroot=%llu(%s) type=%s diff=%lld",
@@ -1693,7 +1695,6 @@ TRACE_EVENT(qgroup_meta_convert,
 	TP_STRUCT__entry_btrfs(
 		__field(	u64,	refroot			)
 		__field(	s64,	diff			)
-		__field(	int,	type			)
 	),
 
 	TP_fast_assign_btrfs(root->fs_info,
diff --git a/include/uapi/linux/ipa_qmi_service_v01.h b/include/uapi/linux/ipa_qmi_service_v01.h
index b055c26..271bd96 100644
--- a/include/uapi/linux/ipa_qmi_service_v01.h
+++ b/include/uapi/linux/ipa_qmi_service_v01.h
@@ -87,8 +87,6 @@ enum ipa_qmi_error_type_v01 {
 	IPA_QMI_ERROR_TYPE_MAX_ENUM_VAL_V01 = IPA_INT_MAX,
 };
 
-#pragma pack(push, 1)
-
 struct ipa_qmi_response_type_v01 {
 	uint16_t result;
 	uint16_t error;
@@ -2673,8 +2671,6 @@ struct ipa_bw_change_ind_msg_v01 {
 	/* Kbps */
 	uint32_t peak_bw_dl;
 }; /* Message */
-
-#pragma pack(pop)
 #define IPA_BW_CHANGE_IND_MSG_V01_MAX_MSG_LEN 14
 
 /*Service Message Definition*/
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index 4416ecc..2372f06 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -210,6 +210,7 @@
 #define IPA_FLT_L2TP_INNER_IP_TYPE	(1ul << 25)
 #define IPA_FLT_L2TP_INNER_IPV4_DST_ADDR (1ul << 26)
 #define IPA_FLT_IS_PURE_ACK		(1ul << 27)
+#define IPA_FLT_VLAN_ID			(1ul << 28)
 
 /**
  * maximal number of NAT PDNs in the PDN config table
@@ -815,6 +816,7 @@ enum ipa_hw_type {
  * @u.v6.src_addr_mask: src address mask
  * @u.v6.dst_addr: dst address val
  * @u.v6.dst_addr_mask: dst address mask
+ * @vlan_id: vlan id value
  */
 struct ipa_rule_attrib {
 	uint32_t attrib_mask;
@@ -855,6 +857,7 @@ struct ipa_rule_attrib {
 			uint32_t dst_addr_mask[4];
 		} v6;
 	} u;
+	uint16_t vlan_id;
 };
 
 /*! @brief The maximum number of Mask Equal 32 Eqns */
diff --git a/include/uapi/linux/msm_npu.h b/include/uapi/linux/msm_npu.h
index 9309567..bd68c53 100644
--- a/include/uapi/linux/msm_npu.h
+++ b/include/uapi/linux/msm_npu.h
@@ -78,6 +78,8 @@
 #define MSM_NPU_PROP_ID_PERF_MODE_MAX (MSM_NPU_PROP_ID_START + 2)
 #define MSM_NPU_PROP_ID_DRV_VERSION (MSM_NPU_PROP_ID_START + 3)
 #define MSM_NPU_PROP_ID_HARDWARE_VERSION (MSM_NPU_PROP_ID_START + 4)
+#define MSM_NPU_PROP_ID_IPC_QUEUE_INFO (MSM_NPU_PROP_ID_START + 5)
+#define MSM_NPU_PROP_ID_DRV_FEATURE (MSM_NPU_PROP_ID_START + 6)
 
 #define MSM_NPU_FW_PROP_ID_START 0x1000
 #define MSM_NPU_PROP_ID_DCVS_MODE (MSM_NPU_FW_PROP_ID_START + 0)
@@ -86,6 +88,9 @@
 #define MSM_NPU_PROP_ID_HW_VERSION (MSM_NPU_FW_PROP_ID_START + 3)
 #define MSM_NPU_PROP_ID_FW_VERSION (MSM_NPU_FW_PROP_ID_START + 4)
 
+/* features supported by driver */
+#define MSM_NPU_FEATURE_MULTI_EXECUTE  0x1
+#define MSM_NPU_FEATURE_ASYNC_EXECUTE  0x2
 
 #define PROP_PARAM_MAX_SIZE 8
 
diff --git a/include/uapi/scsi/ufs/ufs.h b/include/uapi/scsi/ufs/ufs.h
index 8f906c9..22a0a7e 100644
--- a/include/uapi/scsi/ufs/ufs.h
+++ b/include/uapi/scsi/ufs/ufs.h
@@ -17,6 +17,9 @@ enum flag_idn {
 	QUERY_FLAG_IDN_BUSY_RTC                         = 0x09,
 	QUERY_FLAG_IDN_RESERVED3                        = 0x0A,
 	QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE    = 0x0B,
+	QUERY_FLAG_IDN_WB_EN                            = 0x0E,
+	QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN                 = 0x0F,
+	QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8     = 0x10,
 };
 
 /* Attribute idn for Query requests */
@@ -45,6 +48,10 @@ enum attr_idn {
 	QUERY_ATTR_IDN_PSA_STATE                = 0x15,
 	QUERY_ATTR_IDN_PSA_DATA_SIZE            = 0x16,
 	QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME	= 0x17,
+	QUERY_ATTR_IDN_WB_FLUSH_STATUS	        = 0x1C,
+	QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE       = 0x1D,
+	QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST    = 0x1E,
+	QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE        = 0x1F,
 };
 
 #define QUERY_ATTR_IDN_BOOT_LU_EN_MAX	0x02
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 36be400..afee05e 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -621,6 +621,14 @@ static void bpf_jit_uncharge_modmem(u32 pages)
 	atomic_long_sub(pages, &bpf_jit_current);
 }
 
+#if IS_ENABLED(CONFIG_BPF_JIT) && IS_ENABLED(CONFIG_CFI_CLANG)
+bool __weak arch_bpf_jit_check_func(const struct bpf_prog *prog)
+{
+	return true;
+}
+EXPORT_SYMBOL(arch_bpf_jit_check_func);
+#endif
+
 struct bpf_binary_header *
 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
 		     unsigned int alignment,
@@ -647,6 +655,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
 	/* Fill space with illegal/arch-dep instructions. */
 	bpf_fill_ill_insns(hdr, size);
 
+	bpf_jit_set_header_magic(hdr);
 	hdr->pages = pages;
 	hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
 		     PAGE_SIZE - sizeof(*hdr));
diff --git a/kernel/elfcore.c b/kernel/elfcore.c
index fc482c8..57fb4dc 100644
--- a/kernel/elfcore.c
+++ b/kernel/elfcore.c
@@ -3,6 +3,7 @@
 #include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/binfmts.h>
+#include <linux/elfcore.h>
 
 Elf_Half __weak elf_core_extra_phdrs(void)
 {
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 6d7d708..18d8f2a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6986,7 +6986,7 @@ static void __perf_event_output_stop(struct perf_event *event, void *data)
 static int __perf_pmu_output_stop(void *info)
 {
 	struct perf_event *event = info;
-	struct pmu *pmu = event->pmu;
+	struct pmu *pmu = event->ctx->pmu;
 	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
 	struct remote_output ro = {
 		.rb	= event->rb,
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index d6b5618..bf3f2d3 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -426,7 +426,7 @@ static int hw_breakpoint_parse(struct perf_event *bp,
 
 int register_perf_hw_breakpoint(struct perf_event *bp)
 {
-	struct arch_hw_breakpoint hw;
+	struct arch_hw_breakpoint hw = { };
 	int err;
 
 	err = reserve_bp_slot(bp);
@@ -474,7 +474,7 @@ int
 modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
 			        bool check)
 {
-	struct arch_hw_breakpoint hw;
+	struct arch_hw_breakpoint hw = { };
 	int err;
 
 	err = hw_breakpoint_parse(bp, attr, &hw);
diff --git a/kernel/fork.c b/kernel/fork.c
index 6a5d06d..55ec95a 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -2670,7 +2670,7 @@ int sysctl_max_threads(struct ctl_table *table, int write,
 	struct ctl_table t;
 	int ret;
 	int threads = max_threads;
-	int min = MIN_THREADS;
+	int min = 1;
 	int max = MAX_THREADS;
 
 	t = *table;
@@ -2682,7 +2682,7 @@ int sysctl_max_threads(struct ctl_table *table, int write,
 	if (ret || !write)
 		return ret;
 
-	set_max_threads(threads);
+	max_threads = threads;
 
 	return 0;
 }
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index 5a0cf5f..82104d3 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -271,7 +271,7 @@ pv_wait_early(struct pv_node *prev, int loop)
 	if ((loop & PV_PREV_CHECK_MASK) != 0)
 		return false;
 
-	return READ_ONCE(prev->state) != vcpu_running || vcpu_is_preempted(prev->cpu);
+	return READ_ONCE(prev->state) != vcpu_running;
 }
 
 /*
diff --git a/kernel/panic.c b/kernel/panic.c
index 7241892..9bc9c23 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -150,6 +150,7 @@ void panic(const char *fmt, ...)
 	 * after setting panic_cpu) from invoking panic() again.
 	 */
 	local_irq_disable();
+	preempt_disable_notrace();
 
 	/*
 	 * It's possible to come here directly from a panic-assertion and
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 34c19a1..d62b9f5 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9208,10 +9208,6 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
 {
 	unsigned long capacity = arch_scale_cpu_capacity(sd, cpu);
 	struct sched_group *sdg = sd->groups;
-	struct max_cpu_capacity *mcc;
-	unsigned long max_capacity;
-	int max_cap_cpu;
-	unsigned long flags;
 
 	capacity *= arch_scale_max_freq_capacity(sd, cpu);
 	capacity >>= SCHED_CAPACITY_SHIFT;
@@ -9219,26 +9215,6 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
 	capacity = min(capacity, thermal_cap(cpu));
 	cpu_rq(cpu)->cpu_capacity_orig = capacity;
 
-	mcc = &cpu_rq(cpu)->rd->max_cpu_capacity;
-
-	raw_spin_lock_irqsave(&mcc->lock, flags);
-	max_capacity = mcc->val;
-	max_cap_cpu = mcc->cpu;
-
-	if ((max_capacity > capacity && max_cap_cpu == cpu) ||
-	    (max_capacity < capacity)) {
-		mcc->val = capacity;
-		mcc->cpu = cpu;
-#ifdef CONFIG_SCHED_DEBUG
-		raw_spin_unlock_irqrestore(&mcc->lock, flags);
-		printk_deferred(KERN_INFO "CPU%d: update max cpu_capacity %lu\n",
-				cpu, capacity);
-		goto skip_unlock;
-#endif
-	}
-	raw_spin_unlock_irqrestore(&mcc->lock, flags);
-
-skip_unlock: __attribute__ ((unused));
 	capacity = scale_rt_capacity(cpu, capacity);
 
 	if (!capacity)
@@ -9770,6 +9746,19 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
 			sgs->group_type = group_classify(sg, sgs);
 		}
 
+		/*
+		 * Disallow moving tasks from asym cap sibling CPUs to other
+		 * CPUs (lower capacity) unless the asym cap sibling group has
+		 * no capacity to manage the current load.
+		 */
+		if ((env->sd->flags & SD_ASYM_CPUCAPACITY) &&
+			sgs->group_no_capacity &&
+			asym_cap_sibling_group_has_capacity(env->dst_cpu,
+						env->sd->imbalance_pct)) {
+			sgs->group_no_capacity = 0;
+			sgs->group_type = group_classify(sg, sgs);
+		}
+
 		if (update_sd_pick_busiest(env, sds, sg, sgs)) {
 			sds->busiest = sg;
 			sds->busiest_stat = *sgs;
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
index 76e0eaf..dd27e632 100644
--- a/kernel/sched/membarrier.c
+++ b/kernel/sched/membarrier.c
@@ -235,7 +235,7 @@ static int membarrier_register_private_expedited(int flags)
 	 * groups, which use the same mm. (CLONE_VM but not
 	 * CLONE_THREAD).
 	 */
-	if (atomic_read(&mm->membarrier_state) & state)
+	if ((atomic_read(&mm->membarrier_state) & state) == state)
 		return 0;
 	atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED, &mm->membarrier_state);
 	if (flags & MEMBARRIER_FLAG_SYNC_CORE)
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index 6c208cc..4170afd 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -575,8 +575,12 @@ static u64 update_triggers(struct psi_group *group, u64 now)
 		trace_psi_event(t->state, t->threshold);
 
 		/* Generate an event */
-		if (cmpxchg(&t->event, 0, 1) == 0)
+		if (cmpxchg(&t->event, 0, 1) == 0) {
+			if (!strcmp(t->comm, ULMK_MAGIC))
+				mod_timer(&t->wdog_timer, jiffies +
+					  nsecs_to_jiffies(2 * t->win.size));
 			wake_up_interruptible(&t->event_wait);
+		}
 		t->last_event_time = now;
 	}
 
@@ -588,10 +592,14 @@ static u64 update_triggers(struct psi_group *group, u64 now)
 	return now + group->poll_min_period;
 }
 
+/*
+ * Allows sending more than one event per window.
+ */
 void psi_emergency_trigger(void)
 {
 	struct psi_group *group = &psi_system;
 	struct psi_trigger *t;
+	u64 now;
 
 	if (static_branch_likely(&psi_disabled))
 		return;
@@ -603,19 +611,55 @@ void psi_emergency_trigger(void)
 	if (!mutex_trylock(&group->trigger_lock))
 		return;
 
+	now = sched_clock();
 	list_for_each_entry(t, &group->triggers, node) {
 		if (strcmp(t->comm, ULMK_MAGIC))
 			continue;
 		trace_psi_event(t->state, t->threshold);
 
 		/* Generate an event */
-		if (cmpxchg(&t->event, 0, 1) == 0)
+		if (cmpxchg(&t->event, 0, 1) == 0) {
+			mod_timer(&t->wdog_timer, (unsigned long)t->win.size);
 			wake_up_interruptible(&t->event_wait);
+		}
+		t->last_event_time = now;
 	}
 	mutex_unlock(&group->trigger_lock);
 }
 
 /*
+ * Return true if any trigger is active.
+ */
+bool psi_is_trigger_active(void)
+{
+	struct psi_group *group = &psi_system;
+	struct psi_trigger *t;
+	bool trigger_active = false;
+	u64 now;
+
+	if (static_branch_likely(&psi_disabled))
+		return false;
+
+	/*
+	 * In unlikely case that OOM was triggered while adding/
+	 * removing triggers.
+	 */
+	if (!mutex_trylock(&group->trigger_lock))
+		return true;
+
+	now = sched_clock();
+	list_for_each_entry(t, &group->triggers, node) {
+		if (strcmp(t->comm, ULMK_MAGIC))
+			continue;
+
+		if (now <= t->last_event_time + t->win.size)
+			trigger_active = true;
+	}
+	mutex_unlock(&group->trigger_lock);
+	return trigger_active;
+}
+
+/*
  * Schedule polling if it's not already scheduled. It's safe to call even from
  * hotpath because even though kthread_queue_delayed_work takes worker->lock
  * spinlock that spinlock is never contended due to poll_scheduled atomic
@@ -1116,6 +1160,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
 	init_waitqueue_head(&t->event_wait);
 	kref_init(&t->refcount);
 	get_task_comm(t->comm, current);
+	timer_setup(&t->wdog_timer, ulmk_watchdog_fn, TIMER_DEFERRABLE);
 
 	mutex_lock(&group->trigger_lock);
 
@@ -1188,6 +1233,7 @@ static void psi_trigger_destroy(struct kref *ref)
 		}
 	}
 
+	del_timer_sync(&t->wdog_timer);
 	mutex_unlock(&group->trigger_lock);
 
 	/*
@@ -1241,8 +1287,11 @@ __poll_t psi_trigger_poll(void **trigger_ptr,
 
 	poll_wait(file, &t->event_wait, wait);
 
-	if (cmpxchg(&t->event, 1, 0) == 1)
+	if (cmpxchg(&t->event, 1, 0) == 1) {
 		ret |= EPOLLPRI;
+		if (!strcmp(t->comm, ULMK_MAGIC))
+			ulmk_watchdog_pet(&t->wdog_timer);
+	}
 
 	kref_put(&t->refcount, psi_trigger_destroy);
 
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 652424f..47f9add 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2699,6 +2699,35 @@ static inline int asym_cap_siblings(int cpu1, int cpu2)
 		cpumask_test_cpu(cpu2, &asym_cap_sibling_cpus));
 }
 
+static inline bool asym_cap_sibling_group_has_capacity(int dst_cpu, int margin)
+{
+	int sib1, sib2;
+	int nr_running;
+	unsigned long total_util, total_capacity;
+
+	if (cpumask_empty(&asym_cap_sibling_cpus) ||
+			cpumask_test_cpu(dst_cpu, &asym_cap_sibling_cpus))
+		return false;
+
+	sib1 = cpumask_first(&asym_cap_sibling_cpus);
+	sib2 = cpumask_last(&asym_cap_sibling_cpus);
+
+	if (!cpu_active(sib1) || cpu_isolated(sib1) ||
+		!cpu_active(sib2) || cpu_isolated(sib2))
+		return false;
+
+	nr_running = cpu_rq(sib1)->cfs.h_nr_running +
+			cpu_rq(sib2)->cfs.h_nr_running;
+
+	if (nr_running <= 2)
+		return true;
+
+	total_capacity = capacity_of(sib1) + capacity_of(sib2);
+	total_util = cpu_util(sib1) + cpu_util(sib2);
+
+	return ((total_capacity * 100) > (total_util * margin));
+}
+
 static inline int cpu_max_possible_capacity(int cpu)
 {
 	return cpu_rq(cpu)->cluster->max_possible_capacity;
@@ -2914,7 +2943,11 @@ static inline enum sched_boost_policy task_boost_policy(struct task_struct *p)
 
 static inline bool is_min_capacity_cluster(struct sched_cluster *cluster)
 {
-	return is_min_capacity_cpu(cluster_first_cpu(cluster));
+	int cpu = cluster_first_cpu(cluster);
+
+	if (cpu >= num_possible_cpus())
+		return false;
+	return is_min_capacity_cpu(cpu);
 }
 
 #else	/* CONFIG_SCHED_WALT */
@@ -2978,6 +3011,11 @@ static inline struct sched_cluster *rq_cluster(struct rq *rq)
 
 static inline int asym_cap_siblings(int cpu1, int cpu2) { return 0; }
 
+static inline bool asym_cap_sibling_group_has_capacity(int dst_cpu, int margin)
+{
+	return false;
+}
+
 static inline void set_preferred_cluster(struct related_thread_group *grp) { }
 
 static inline bool task_in_related_thread_group(struct task_struct *p)
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index df60ad6..ffdaf2f 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1976,12 +1976,12 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
 
 		sd = *per_cpu_ptr(d.sd, i);
 
-		if ((max_cpu < 0) || (cpu_rq(i)->cpu_capacity_orig >
-				cpu_rq(max_cpu)->cpu_capacity_orig))
+		if ((max_cpu < 0) || (arch_scale_cpu_capacity(NULL, i) >
+				arch_scale_cpu_capacity(NULL, max_cpu)))
 			WRITE_ONCE(d.rd->max_cap_orig_cpu, i);
 
-		if ((min_cpu < 0) || (cpu_rq(i)->cpu_capacity_orig <
-				cpu_rq(min_cpu)->cpu_capacity_orig))
+		if ((min_cpu < 0) || (arch_scale_cpu_capacity(NULL, i) <
+				arch_scale_cpu_capacity(NULL, min_cpu)))
 			WRITE_ONCE(d.rd->min_cap_orig_cpu, i);
 
 		cpu_attach_domain(sd, d.rd, i);
@@ -1992,14 +1992,26 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
 		int max_cpu = READ_ONCE(d.rd->max_cap_orig_cpu);
 		int min_cpu = READ_ONCE(d.rd->min_cap_orig_cpu);
 
-		if ((cpu_rq(i)->cpu_capacity_orig
-				!=  cpu_rq(min_cpu)->cpu_capacity_orig) &&
-				(cpu_rq(i)->cpu_capacity_orig
-				!=  cpu_rq(max_cpu)->cpu_capacity_orig)) {
+		if ((arch_scale_cpu_capacity(NULL, i)
+				!=  arch_scale_cpu_capacity(NULL, min_cpu)) &&
+				(arch_scale_cpu_capacity(NULL, i)
+				!=  arch_scale_cpu_capacity(NULL, max_cpu))) {
 			WRITE_ONCE(d.rd->mid_cap_orig_cpu, i);
 			break;
 		}
 	}
+
+	/*
+	 * The max_cpu_capacity reflect the original capacity which does not
+	 * change dynamically. So update the max cap CPU and its capacity
+	 * here.
+	 */
+	if (d.rd->max_cap_orig_cpu != -1) {
+		d.rd->max_cpu_capacity.cpu = d.rd->max_cap_orig_cpu;
+		d.rd->max_cpu_capacity.val = arch_scale_cpu_capacity(NULL,
+						d.rd->max_cap_orig_cpu);
+	}
+
 	rcu_read_unlock();
 
 	if (has_asym)
diff --git a/kernel/signal.c b/kernel/signal.c
index 4011898..4a321c6 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1369,6 +1369,7 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
 	rcu_read_unlock();
 
 	if (!ret && sig) {
+		check_panic_on_foreground_kill(p);
 		ret = do_send_sig_info(sig, info, p, type);
 		if (capable(CAP_KILL) && sig == SIGKILL) {
 			add_to_oom_reaper(p);
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
index a59641f..a836efd3 100644
--- a/kernel/time/tick-broadcast-hrtimer.c
+++ b/kernel/time/tick-broadcast-hrtimer.c
@@ -44,34 +44,39 @@ static int bc_shutdown(struct clock_event_device *evt)
  */
 static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
 {
-	int bc_moved;
 	/*
-	 * We try to cancel the timer first. If the callback is on
-	 * flight on some other cpu then we let it handle it. If we
-	 * were able to cancel the timer nothing can rearm it as we
-	 * own broadcast_lock.
+	 * This is called either from enter/exit idle code or from the
+	 * broadcast handler. In all cases tick_broadcast_lock is held.
 	 *
-	 * However we can also be called from the event handler of
-	 * ce_broadcast_hrtimer itself when it expires. We cannot
-	 * restart the timer because we are in the callback, but we
-	 * can set the expiry time and let the callback return
-	 * HRTIMER_RESTART.
+	 * hrtimer_cancel() cannot be called here neither from the
+	 * broadcast handler nor from the enter/exit idle code. The idle
+	 * code can run into the problem described in bc_shutdown() and the
+	 * broadcast handler cannot wait for itself to complete for obvious
+	 * reasons.
 	 *
-	 * Since we are in the idle loop at this point and because
-	 * hrtimer_{start/cancel} functions call into tracing,
-	 * calls to these functions must be bound within RCU_NONIDLE.
+	 * Each caller tries to arm the hrtimer on its own CPU, but if the
+	 * hrtimer callbback function is currently running, then
+	 * hrtimer_start() cannot move it and the timer stays on the CPU on
+	 * which it is assigned at the moment.
+	 *
+	 * As this can be called from idle code, the hrtimer_start()
+	 * invocation has to be wrapped with RCU_NONIDLE() as
+	 * hrtimer_start() can call into tracing.
 	 */
-	RCU_NONIDLE({
-			bc_moved = hrtimer_try_to_cancel(&bctimer) >= 0;
-			if (bc_moved)
-				hrtimer_start(&bctimer, expires,
-					      HRTIMER_MODE_ABS_PINNED);});
-	if (bc_moved) {
-		/* Bind the "device" to the cpu */
-		bc->bound_on = smp_processor_id();
-	} else if (bc->bound_on == smp_processor_id()) {
-		hrtimer_set_expires(&bctimer, expires);
-	}
+	RCU_NONIDLE( {
+		hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED);
+		/*
+		 * The core tick broadcast mode expects bc->bound_on to be set
+		 * correctly to prevent a CPU which has the broadcast hrtimer
+		 * armed from going deep idle.
+		 *
+		 * As tick_broadcast_lock is held, nothing can change the cpu
+		 * base which was just established in hrtimer_start() above. So
+		 * the below access is safe even without holding the hrtimer
+		 * base lock.
+		 */
+		bc->bound_on = bctimer.base->cpu_base->cpu;
+	} );
 	return 0;
 }
 
@@ -97,10 +102,6 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t)
 {
 	ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer);
 
-	if (clockevent_state_oneshot(&ce_broadcast_hrtimer))
-		if (ce_broadcast_hrtimer.next_event != KTIME_MAX)
-			return HRTIMER_RESTART;
-
 	return HRTIMER_NORESTART;
 }
 
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index e0d0fd71..5bbb433 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1634,24 +1634,26 @@ void timer_clear_idle(void)
 static int collect_expired_timers(struct timer_base *base,
 				  struct hlist_head *heads)
 {
+	unsigned long now = READ_ONCE(jiffies);
+
 	/*
 	 * NOHZ optimization. After a long idle sleep we need to forward the
 	 * base to current jiffies. Avoid a loop by searching the bitfield for
 	 * the next expiring timer.
 	 */
-	if ((long)(jiffies - base->clk) > 2) {
+	if ((long)(now - base->clk) > 2) {
 		unsigned long next = __next_timer_interrupt(base);
 
 		/*
 		 * If the next timer is ahead of time forward to current
 		 * jiffies, otherwise forward to the next expiry time:
 		 */
-		if (time_after(next, jiffies)) {
+		if (time_after(next, now)) {
 			/*
 			 * The call site will increment base->clk and then
 			 * terminate the expiry loop immediately.
 			 */
-			base->clk = jiffies;
+			base->clk = now;
 			return 0;
 		}
 		base->clk = next;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 18d782a..b3ce6a9 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3558,21 +3558,22 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
 	struct ftrace_hash *hash;
 	struct list_head *mod_head;
 	struct trace_array *tr = ops->private;
-	int ret = 0;
+	int ret = -ENOMEM;
 
 	ftrace_ops_init(ops);
 
 	if (unlikely(ftrace_disabled))
 		return -ENODEV;
 
+	if (tr && trace_array_get(tr) < 0)
+		return -ENODEV;
+
 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
 	if (!iter)
-		return -ENOMEM;
+		goto out;
 
-	if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
-		kfree(iter);
-		return -ENOMEM;
-	}
+	if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
+		goto out;
 
 	iter->ops = ops;
 	iter->flags = flag;
@@ -3602,13 +3603,13 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
 
 		if (!iter->hash) {
 			trace_parser_put(&iter->parser);
-			kfree(iter);
-			ret = -ENOMEM;
 			goto out_unlock;
 		}
 	} else
 		iter->hash = hash;
 
+	ret = 0;
+
 	if (file->f_mode & FMODE_READ) {
 		iter->pg = ftrace_pages_start;
 
@@ -3620,7 +3621,6 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
 			/* Failed */
 			free_ftrace_hash(iter->hash);
 			trace_parser_put(&iter->parser);
-			kfree(iter);
 		}
 	} else
 		file->private_data = iter;
@@ -3628,6 +3628,13 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
  out_unlock:
 	mutex_unlock(&ops->func_hash->regex_lock);
 
+ out:
+	if (ret) {
+		kfree(iter);
+		if (tr)
+			trace_array_put(tr);
+	}
+
 	return ret;
 }
 
@@ -5025,6 +5032,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
 
 	mutex_unlock(&iter->ops->func_hash->regex_lock);
 	free_ftrace_hash(iter->hash);
+	if (iter->tr)
+		trace_array_put(iter->tr);
 	kfree(iter);
 
 	return 0;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 10b1e4c..3f4fd25 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -4160,9 +4160,14 @@ static int show_traces_open(struct inode *inode, struct file *file)
 	if (tracing_disabled)
 		return -ENODEV;
 
+	if (trace_array_get(tr) < 0)
+		return -ENODEV;
+
 	ret = seq_open(file, &show_traces_seq_ops);
-	if (ret)
+	if (ret) {
+		trace_array_put(tr);
 		return ret;
+	}
 
 	m = file->private_data;
 	m->private = tr;
@@ -4170,6 +4175,14 @@ static int show_traces_open(struct inode *inode, struct file *file)
 	return 0;
 }
 
+static int show_traces_release(struct inode *inode, struct file *file)
+{
+	struct trace_array *tr = inode->i_private;
+
+	trace_array_put(tr);
+	return seq_release(inode, file);
+}
+
 static ssize_t
 tracing_write_stub(struct file *filp, const char __user *ubuf,
 		   size_t count, loff_t *ppos)
@@ -4200,8 +4213,8 @@ static const struct file_operations tracing_fops = {
 static const struct file_operations show_traces_fops = {
 	.open		= show_traces_open,
 	.read		= seq_read,
-	.release	= seq_release,
 	.llseek		= seq_lseek,
+	.release	= show_traces_release,
 };
 
 static ssize_t
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 71afb6e..32e814e 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -2526,6 +2526,8 @@ static struct hist_field *create_alias(struct hist_trigger_data *hist_data,
 		return NULL;
 	}
 
+	alias->var_ref_idx = var_ref->var_ref_idx;
+
 	return alias;
 }
 
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index 1e6db9c..8030e24d 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -150,7 +150,7 @@ void trace_hwlat_callback(bool enter)
 		if (enter)
 			nmi_ts_start = time_get();
 		else
-			nmi_total_ts = time_get() - nmi_ts_start;
+			nmi_total_ts += time_get() - nmi_ts_start;
 	}
 
 	if (enter)
@@ -256,6 +256,8 @@ static int get_sample(void)
 		/* Keep a running maximum ever recorded hardware latency */
 		if (sample > tr->max_latency)
 			tr->max_latency = sample;
+		if (outer_sample > tr->max_latency)
+			tr->max_latency = outer_sample;
 	}
 
 out:
diff --git a/lib/textsearch.c b/lib/textsearch.c
index 5939549..9135c29 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -93,9 +93,9 @@
  *       goto errout;
  *   }
  *
- *   pos = textsearch_find_continuous(conf, \&state, example, strlen(example));
+ *   pos = textsearch_find_continuous(conf, &state, example, strlen(example));
  *   if (pos != UINT_MAX)
- *       panic("Oh my god, dancing chickens at \%d\n", pos);
+ *       panic("Oh my god, dancing chickens at %d\n", pos);
  *
  *   textsearch_destroy(conf);
  */
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 1710826..84de70b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1073,11 +1073,10 @@ static bool pfn_range_valid_gigantic(struct zone *z,
 	struct page *page;
 
 	for (i = start_pfn; i < end_pfn; i++) {
-		if (!pfn_valid(i))
+		page = pfn_to_online_page(i);
+		if (!page)
 			return false;
 
-		page = pfn_to_page(i);
-
 		if (page_zone(page) != z)
 			return false;
 
diff --git a/mm/memfd.c b/mm/memfd.c
index a9ece5f..908379a 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -34,11 +34,12 @@ static void memfd_tag_pins(struct address_space *mapping)
 	void __rcu **slot;
 	pgoff_t start;
 	struct page *page;
+	unsigned int tagged = 0;
 
 	lru_add_drain();
 	start = 0;
-	rcu_read_lock();
 
+	xa_lock_irq(&mapping->i_pages);
 	radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
 		page = radix_tree_deref_slot(slot);
 		if (!page || radix_tree_exception(page)) {
@@ -47,18 +48,19 @@ static void memfd_tag_pins(struct address_space *mapping)
 				continue;
 			}
 		} else if (page_count(page) - page_mapcount(page) > 1) {
-			xa_lock_irq(&mapping->i_pages);
 			radix_tree_tag_set(&mapping->i_pages, iter.index,
 					   MEMFD_TAG_PINNED);
-			xa_unlock_irq(&mapping->i_pages);
 		}
 
-		if (need_resched()) {
-			slot = radix_tree_iter_resume(slot, &iter);
-			cond_resched_rcu();
-		}
+		if (++tagged % 1024)
+			continue;
+
+		slot = radix_tree_iter_resume(slot, &iter);
+		xa_unlock_irq(&mapping->i_pages);
+		cond_resched();
+		xa_lock_irq(&mapping->i_pages);
 	}
-	rcu_read_unlock();
+	xa_unlock_irq(&mapping->i_pages);
 }
 
 /*
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 3b2e4a2..4bbc07b 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -202,7 +202,6 @@ struct to_kill {
 	struct task_struct *tsk;
 	unsigned long addr;
 	short size_shift;
-	char addr_valid;
 };
 
 /*
@@ -327,22 +326,27 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
 		}
 	}
 	tk->addr = page_address_in_vma(p, vma);
-	tk->addr_valid = 1;
 	if (is_zone_device_page(p))
 		tk->size_shift = dev_pagemap_mapping_shift(p, vma);
 	else
 		tk->size_shift = compound_order(compound_head(p)) + PAGE_SHIFT;
 
 	/*
-	 * In theory we don't have to kill when the page was
-	 * munmaped. But it could be also a mremap. Since that's
-	 * likely very rare kill anyways just out of paranoia, but use
-	 * a SIGKILL because the error is not contained anymore.
+	 * Send SIGKILL if "tk->addr == -EFAULT". Also, as
+	 * "tk->size_shift" is always non-zero for !is_zone_device_page(),
+	 * so "tk->size_shift == 0" effectively checks no mapping on
+	 * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times
+	 * to a process' address space, it's possible not all N VMAs
+	 * contain mappings for the page, but at least one VMA does.
+	 * Only deliver SIGBUS with payload derived from the VMA that
+	 * has a mapping for the page.
 	 */
-	if (tk->addr == -EFAULT || tk->size_shift == 0) {
+	if (tk->addr == -EFAULT) {
 		pr_info("Memory failure: Unable to find user space address %lx in %s\n",
 			page_to_pfn(p), tsk->comm);
-		tk->addr_valid = 0;
+	} else if (tk->size_shift == 0) {
+		kfree(tk);
+		return;
 	}
 	get_task_struct(tsk);
 	tk->tsk = tsk;
@@ -369,7 +373,7 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
 			 * make sure the process doesn't catch the
 			 * signal and then access the memory. Just kill it.
 			 */
-			if (fail || tk->addr_valid == 0) {
+			if (fail || tk->addr == -EFAULT) {
 				pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
 				       pfn, tk->tsk->comm, tk->tsk->pid);
 				do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
@@ -1258,17 +1262,19 @@ int memory_failure(unsigned long pfn, int flags)
 	if (!sysctl_memory_failure_recovery)
 		panic("Memory failure on page %lx", pfn);
 
-	if (!pfn_valid(pfn)) {
+	p = pfn_to_online_page(pfn);
+	if (!p) {
+		if (pfn_valid(pfn)) {
+			pgmap = get_dev_pagemap(pfn, NULL);
+			if (pgmap)
+				return memory_failure_dev_pagemap(pfn, flags,
+								  pgmap);
+		}
 		pr_err("Memory failure: %#lx: memory outside kernel control\n",
 			pfn);
 		return -ENXIO;
 	}
 
-	pgmap = get_dev_pagemap(pfn, NULL);
-	if (pgmap)
-		return memory_failure_dev_pagemap(pfn, flags, pgmap);
-
-	p = pfn_to_page(pfn);
 	if (PageHuge(p))
 		return memory_failure_hugetlb(pfn, flags);
 	if (TestSetPageHWPoison(p)) {
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 5fb4bcc..033b214 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -58,6 +58,9 @@ int sysctl_oom_kill_allocating_task;
 int sysctl_oom_dump_tasks = 1;
 int sysctl_reap_mem_on_sigkill = 1;
 
+static int panic_on_adj_zero;
+module_param(panic_on_adj_zero, int, 0644);
+
 /*
  * Serializes oom killer invocations (out_of_memory()) from all contexts to
  * prevent from over eager oom killing (e.g. when the oom killer is invoked
@@ -74,8 +77,21 @@ DEFINE_MUTEX(oom_lock);
  */
 
 #ifdef CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER
+
+/* The maximum amount of time to loop in should_ulmk_retry() */
+#define ULMK_TIMEOUT (20 * HZ)
+
+#define ULMK_DBG_POLICY_TRIGGER (BIT(0))
+#define ULMK_DBG_POLICY_WDOG (BIT(1))
+#define ULMK_DBG_POLICY_POSITIVE_ADJ (BIT(2))
+#define ULMK_DBG_POLICY_ALL (BIT(3) - 1)
+static unsigned int ulmk_dbg_policy;
+module_param(ulmk_dbg_policy, uint, 0644);
+
+static atomic64_t ulmk_wdog_expired = ATOMIC64_INIT(0);
 static atomic64_t ulmk_kill_jiffies = ATOMIC64_INIT(INITIAL_JIFFIES);
 static unsigned long psi_emergency_jiffies = INITIAL_JIFFIES;
+/* Prevents contention on the mutex_trylock in psi_emergency_jiffies */
 static DEFINE_MUTEX(ulmk_retry_lock);
 
 static bool ulmk_kill_possible(void)
@@ -102,50 +118,81 @@ static bool ulmk_kill_possible(void)
 }
 
 /*
- * psi_emergency_jiffies represents the last ULMK emergency event.
- * Give ULMK a 2 second window to handle this event.
- * If ULMK has made some progress since then, send another.
- * Repeat as necessary.
+ * If CONFIG_DEBUG_PANIC_ON_OOM is enabled, attempt to determine *why*
+ * we are in this state.
+ * 1) No events were sent by PSI to userspace
+ * 2) PSI sent an event to userspace, but userspace was not able to
+ * receive the event. Possible causes of this include waiting for a
+ * mutex which is held by a process in direct relcaim. Or the userspace
+ * component has crashed.
+ * 3) Userspace received the event, but decided not to kill anything.
  */
-bool should_ulmk_retry(void)
+bool should_ulmk_retry(gfp_t gfp_mask)
 {
 	unsigned long now, last_kill;
-	bool ret = false;
+	bool ret = true;
+	bool wdog_expired, trigger_active;
 
-	mutex_lock(&ulmk_retry_lock);
+	struct oom_control oc = {
+		.zonelist = node_zonelist(first_memory_node, gfp_mask),
+		.nodemask = NULL,
+		.memcg = NULL,
+		.gfp_mask = gfp_mask,
+		.order = 0,
+		/* Also causes check_panic_on_oom not to panic */
+		.only_positive_adj = true,
+	};
+
+	if (!sysctl_panic_on_oom)
+		return false;
+
+	if (gfp_mask & __GFP_RETRY_MAYFAIL)
+		return false;
+
+	/* Someone else is already checking. */
+	if (!mutex_trylock(&ulmk_retry_lock))
+		return true;
+
 	now = jiffies;
 	last_kill = atomic64_read(&ulmk_kill_jiffies);
-	if (time_before(now, psi_emergency_jiffies + 2 * HZ)) {
-		ret = true;
-		goto out;
-	}
+	wdog_expired = atomic64_read(&ulmk_wdog_expired);
+	trigger_active = psi_is_trigger_active();
 
-	if (time_after_eq(last_kill, psi_emergency_jiffies)) {
+	if (time_after(last_kill, psi_emergency_jiffies)) {
 		psi_emergency_jiffies = now;
+		ret = true;
+	} else if (time_after(now, psi_emergency_jiffies + ULMK_TIMEOUT)) {
+		ret = false;
+	} else if (!trigger_active) {
+		BUG_ON(ulmk_dbg_policy & ULMK_DBG_POLICY_TRIGGER);
 		psi_emergency_trigger();
 		ret = true;
-		goto out;
+	} else if (wdog_expired) {
+		mutex_lock(&oom_lock);
+		ret = out_of_memory(&oc);
+		mutex_unlock(&oom_lock);
+		BUG_ON(!ret && ulmk_dbg_policy & ULMK_DBG_POLICY_POSITIVE_ADJ);
+	} else if (!ulmk_kill_possible()) {
+		BUG_ON(ulmk_dbg_policy & ULMK_DBG_POLICY_POSITIVE_ADJ);
+		ret = false;
 	}
 
-	/*
-	 * We reached here means no kill have had happened since the last
-	 * emergency trigger for 2*HZ window. We can't derive the status
-	 * of the low memory killer here. So, before falling back to OOM,
-	 * check for any +ve adj tasks left in the system in repeat for
-	 * next 20*HZ. Indirectly the below logic also giving 20HZ window
-	 * for the first emergency trigger.
-	 */
-	if (time_after(psi_emergency_jiffies + 20 * HZ, now) &&
-	    ulmk_kill_possible()) {
-		ret = true;
-		goto out;
-	}
-
-out:
 	mutex_unlock(&ulmk_retry_lock);
 	return ret;
 }
 
+void ulmk_watchdog_fn(struct timer_list *t)
+{
+	atomic64_set(&ulmk_wdog_expired, 1);
+	BUG_ON(ulmk_dbg_policy & ULMK_DBG_POLICY_WDOG);
+}
+
+void ulmk_watchdog_pet(struct timer_list *t)
+{
+	del_timer_sync(t);
+	atomic64_set(&ulmk_wdog_expired, 0);
+}
+
 void ulmk_update_last_kill(void)
 {
 	atomic64_set(&ulmk_kill_jiffies, jiffies);
@@ -290,7 +337,8 @@ static bool is_dump_unreclaim_slabs(void)
  * task consuming the most memory to avoid subsequent oom failures.
  */
 unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
-			  const nodemask_t *nodemask, unsigned long totalpages)
+			  const nodemask_t *nodemask, unsigned long totalpages,
+			  bool only_positive_adj)
 {
 	long points;
 	long adj;
@@ -309,6 +357,7 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
 	 */
 	adj = (long)p->signal->oom_score_adj;
 	if (adj == OOM_SCORE_ADJ_MIN ||
+			(only_positive_adj && adj < 0) ||
 			test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
 			in_vfork(p)) {
 		task_unlock(p);
@@ -430,7 +479,8 @@ static int oom_evaluate_task(struct task_struct *task, void *arg)
 		goto select;
 	}
 
-	points = oom_badness(task, NULL, oc->nodemask, oc->totalpages);
+	points = oom_badness(task, NULL, oc->nodemask, oc->totalpages,
+				oc->only_positive_adj);
 	if (!points || points < oc->chosen_points)
 		goto next;
 
@@ -966,11 +1016,12 @@ static void __oom_kill_process(struct task_struct *victim)
 	 */
 	do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, PIDTYPE_TGID);
 	mark_oom_victim(victim);
-	pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
+	pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB oom_score_adj=%hd\n",
 		task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
 		K(get_mm_counter(victim->mm, MM_ANONPAGES)),
 		K(get_mm_counter(victim->mm, MM_FILEPAGES)),
-		K(get_mm_counter(victim->mm, MM_SHMEMPAGES)));
+		K(get_mm_counter(victim->mm, MM_SHMEMPAGES)),
+		p->signal->oom_score_adj);
 	task_unlock(victim);
 
 	/*
@@ -1028,7 +1079,8 @@ static int oom_kill_memcg_member(struct task_struct *task, void *unused)
 	return 0;
 }
 
-static void oom_kill_process(struct oom_control *oc, const char *message)
+static void oom_kill_process(struct oom_control *oc, const char *message,
+				bool quiet)
 {
 	struct task_struct *p = oc->chosen;
 	unsigned int points = oc->chosen_points;
@@ -1055,7 +1107,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
 	}
 	task_unlock(p);
 
-	if (__ratelimit(&oom_rs))
+	if (!quiet && __ratelimit(&oom_rs))
 		dump_header(oc, p);
 
 	pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n",
@@ -1085,7 +1137,8 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
 			 * oom_badness() returns 0 if the thread is unkillable
 			 */
 			child_points = oom_badness(child,
-				oc->memcg, oc->nodemask, oc->totalpages);
+				oc->memcg, oc->nodemask, oc->totalpages,
+				oc->only_positive_adj);
 			if (child_points > victim_points) {
 				put_task_struct(victim);
 				victim = child;
@@ -1134,7 +1187,7 @@ static void check_panic_on_oom(struct oom_control *oc,
 			return;
 	}
 	/* Do not panic for oom kills triggered by sysrq */
-	if (is_sysrq_oom(oc))
+	if (is_sysrq_oom(oc) || oc->only_positive_adj)
 		return;
 	dump_header(oc, NULL);
 	panic("Out of memory: %s panic_on_oom is enabled\n",
@@ -1220,7 +1273,8 @@ bool out_of_memory(struct oom_control *oc)
 	    current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
 		get_task_struct(current);
 		oc->chosen = current;
-		oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
+		oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)",
+				 false);
 		return true;
 	}
 
@@ -1234,12 +1288,14 @@ bool out_of_memory(struct oom_control *oc)
 		 * system level, we cannot survive this and will enter
 		 * an endless loop in the allocator. Bail out now.
 		 */
-		if (!is_sysrq_oom(oc) && !is_memcg_oom(oc))
+		if (!is_sysrq_oom(oc) && !is_memcg_oom(oc) &&
+		    !oc->only_positive_adj)
 			panic("System is deadlocked on memory\n");
 	}
 	if (oc->chosen && oc->chosen != (void *)-1UL)
 		oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
-				 "Memory cgroup out of memory");
+				 "Memory cgroup out of memory",
+			IS_ENABLED(CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER));
 	return !!oc->chosen;
 }
 
@@ -1299,3 +1355,18 @@ void add_to_oom_reaper(struct task_struct *p)
 
 	put_task_struct(p);
 }
+
+/*
+ * Should be called prior to sending sigkill. To guarantee that the
+ * process to-be-killed is still untouched.
+ */
+void check_panic_on_foreground_kill(struct task_struct *p)
+{
+	if (unlikely(!strcmp(current->comm, ULMK_MAGIC)
+			&& p->signal->oom_score_adj == 0
+			&& panic_on_adj_zero)) {
+		show_mem(SHOW_MEM_FILTER_NODES, NULL);
+		show_mem_call_notifiers();
+		panic("Attempt to kill foreground task: %s", p->comm);
+	}
+}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5aaf71d6b..fe70c43 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4582,7 +4582,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 				&compaction_retries))
 		goto retry;
 
-	if (order <= PAGE_ALLOC_COSTLY_ORDER && should_ulmk_retry())
+	if (order <= PAGE_ALLOC_COSTLY_ORDER && should_ulmk_retry(gfp_mask))
 		goto retry;
 
 	/* Deal with possible cpuset update races before we start OOM killing */
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 30abd62..a4a603e3 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -304,7 +304,8 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
 	 * not matter as the mixed block count will still be correct
 	 */
 	for (; pfn < end_pfn; ) {
-		if (!pfn_valid(pfn)) {
+		page = pfn_to_online_page(pfn);
+		if (!page) {
 			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
 			continue;
 		}
@@ -312,13 +313,13 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
 		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
 		block_end_pfn = min(block_end_pfn, end_pfn);
 
-		page = pfn_to_page(pfn);
 		pageblock_mt = get_pageblock_migratetype(page);
 
 		for (; pfn < block_end_pfn; pfn++) {
 			if (!pfn_valid_within(pfn))
 				continue;
 
+			/* The pageblock is online, no need to recheck. */
 			page = pfn_to_page(pfn);
 
 			if (page_zone(page) != zone)
diff --git a/mm/slub.c b/mm/slub.c
index 755d155..9e5a7e6 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4811,7 +4811,17 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
 		}
 	}
 
-	get_online_mems();
+	/*
+	 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
+	 * already held which will conflict with an existing lock order:
+	 *
+	 * mem_hotplug_lock->slab_mutex->kernfs_mutex
+	 *
+	 * We don't really need mem_hotplug_lock (to hold off
+	 * slab_mem_going_offline_callback) here because slab's memory hot
+	 * unplug code doesn't destroy the kmem_cache->node[] data.
+	 */
+
 #ifdef CONFIG_SLUB_DEBUG
 	if (flags & SO_ALL) {
 		struct kmem_cache_node *n;
@@ -4852,7 +4862,6 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
 			x += sprintf(buf + x, " N%d=%lu",
 					node, nodes[node]);
 #endif
-	put_online_mems();
 	kfree(nodes);
 	return x + sprintf(buf + x, "\n");
 }
diff --git a/mm/usercopy.c b/mm/usercopy.c
index ac85aeb..45a6eac 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -15,6 +15,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/mm.h>
+#include <linux/highmem.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <linux/sched/task.h>
@@ -237,7 +238,12 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
 	if (!virt_addr_valid(ptr))
 		return;
 
-	page = virt_to_head_page(ptr);
+	/*
+	 * When CONFIG_HIGHMEM=y, kmap_to_page() will give either the
+	 * highmem page or fallback to virt_to_page(). The following
+	 * is effectively a highmem-aware virt_to_head_page().
+	 */
+	page = compound_head(kmap_to_page((void *)ptr));
 
 	if (PageSlab(page)) {
 		/* Check slab allocator for flags and size. */
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index 45dcb4f..cf75fca 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -463,6 +463,9 @@ void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio)
  * "hierarchy" or "local").
  *
  * To be used as memcg event method.
+ *
+ * Return: 0 on success, -ENOMEM on memory failure or -EINVAL if @args could
+ * not be parsed.
  */
 int vmpressure_register_event(struct mem_cgroup *memcg,
 			      struct eventfd_ctx *eventfd, const char *args)
@@ -470,7 +473,7 @@ int vmpressure_register_event(struct mem_cgroup *memcg,
 	struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
 	struct vmpressure_event *ev;
 	enum vmpressure_modes mode = VMPRESSURE_NO_PASSTHROUGH;
-	enum vmpressure_levels level = -1;
+	enum vmpressure_levels level;
 	char *spec, *spec_orig;
 	char *token;
 	int ret = 0;
@@ -483,20 +486,18 @@ int vmpressure_register_event(struct mem_cgroup *memcg,
 
 	/* Find required level */
 	token = strsep(&spec, ",");
-	level = match_string(vmpressure_str_levels, VMPRESSURE_NUM_LEVELS, token);
-	if (level < 0) {
-		ret = level;
+	ret = match_string(vmpressure_str_levels, VMPRESSURE_NUM_LEVELS, token);
+	if (ret < 0)
 		goto out;
-	}
+	level = ret;
 
 	/* Find optional mode */
 	token = strsep(&spec, ",");
 	if (token) {
-		mode = match_string(vmpressure_str_modes, VMPRESSURE_NUM_MODES, token);
-		if (mode < 0) {
-			ret = mode;
+		ret = match_string(vmpressure_str_modes, VMPRESSURE_NUM_MODES, token);
+		if (ret < 0)
 			goto out;
-		}
+		mode = ret;
 	}
 
 	ev = kzalloc(sizeof(*ev), GFP_KERNEL);
@@ -512,6 +513,7 @@ int vmpressure_register_event(struct mem_cgroup *memcg,
 	mutex_lock(&vmpr->events_lock);
 	list_add(&ev->node, &vmpr->events);
 	mutex_unlock(&vmpr->events_lock);
+	ret = 0;
 out:
 	kfree(spec_orig);
 	return ret;
diff --git a/net/9p/client.c b/net/9p/client.c
index b615aae..d62f83f 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -296,6 +296,7 @@ p9_tag_alloc(struct p9_client *c, int8_t type, unsigned int max_size)
 
 	p9pdu_reset(&req->tc);
 	p9pdu_reset(&req->rc);
+	req->t_err = 0;
 	req->status = REQ_STATUS_ALLOC;
 	init_waitqueue_head(&req->wq);
 	INIT_LIST_HEAD(&req->req_list);
diff --git a/net/Kconfig b/net/Kconfig
index f46a913..f1704f5 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -293,7 +293,6 @@
 	bool "enable BPF Just In Time compiler"
 	depends on HAVE_CBPF_JIT || HAVE_EBPF_JIT
 	depends on MODULES
-	depends on !CFI
 	---help---
 	  Berkeley Packet Filter filtering capabilities are normally handled
 	  by an interpreter. This option allows kernel to generate a native
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 7065d68..69127f60 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1476,7 +1476,7 @@ static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
 	prev = cmpxchg(p, orig, rt);
 	if (prev == orig) {
 		if (orig) {
-			dst_dev_put(&orig->dst);
+			rt_add_uncached_list(orig);
 			dst_release(&orig->dst);
 		}
 	} else {
@@ -2381,14 +2381,17 @@ struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
 	int orig_oif = fl4->flowi4_oif;
 	unsigned int flags = 0;
 	struct rtable *rth;
-	int err = -ENETUNREACH;
+	int err;
 
 	if (fl4->saddr) {
-		rth = ERR_PTR(-EINVAL);
 		if (ipv4_is_multicast(fl4->saddr) ||
 		    ipv4_is_lbcast(fl4->saddr) ||
-		    ipv4_is_zeronet(fl4->saddr))
+		    ipv4_is_zeronet(fl4->saddr)) {
+			rth = ERR_PTR(-EINVAL);
 			goto out;
+		}
+
+		rth = ERR_PTR(-ENETUNREACH);
 
 		/* I removed check for oif == dev_out->oif here.
 		   It was wrong for two reasons:
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 886d650..a925d82 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -80,8 +80,10 @@ static void ip6_sublist_rcv_finish(struct list_head *head)
 {
 	struct sk_buff *skb, *next;
 
-	list_for_each_entry_safe(skb, next, head, list)
+	list_for_each_entry_safe(skb, next, head, list) {
+		skb_list_del_init(skb);
 		dst_input(skb);
+	}
 }
 
 static void ip6_list_rcv_finish(struct net *net, struct sock *sk,
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index b919db0..8cc0e5a 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -382,7 +382,7 @@ static int kcm_parse_func_strparser(struct strparser *strp, struct sk_buff *skb)
 	struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
 	struct bpf_prog *prog = psock->bpf_prog;
 
-	return (*prog->bpf_func)(skb, prog->insnsi);
+	return BPF_PROG_RUN(prog, skb);
 }
 
 static int kcm_read_sock_done(struct strparser *strp, int err)
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index d37d4ac..316250a 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -490,9 +490,14 @@ static ssize_t ieee80211_if_fmt_aqm(
 	const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
 {
 	struct ieee80211_local *local = sdata->local;
-	struct txq_info *txqi = to_txq_info(sdata->vif.txq);
+	struct txq_info *txqi;
 	int len;
 
+	if (!sdata->vif.txq)
+		return 0;
+
+	txqi = to_txq_info(sdata->vif.txq);
+
 	spin_lock_bh(&local->fq.lock);
 	rcu_read_lock();
 
@@ -659,7 +664,9 @@ static void add_common_files(struct ieee80211_sub_if_data *sdata)
 	DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_5ghz);
 	DEBUGFS_ADD(hw_queues);
 
-	if (sdata->local->ops->wake_tx_queue)
+	if (sdata->local->ops->wake_tx_queue &&
+	    sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
+	    sdata->vif.type != NL80211_IFTYPE_NAN)
 		DEBUGFS_ADD(aqm);
 }
 
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index dbd9a31..26e8fa7 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -2554,7 +2554,8 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
 
 	rcu_read_lock();
 	ssid = ieee80211_bss_get_ie(cbss, WLAN_EID_SSID);
-	if (WARN_ON_ONCE(ssid == NULL))
+	if (WARN_ONCE(!ssid || ssid[1] > IEEE80211_MAX_SSID_LEN,
+		      "invalid SSID element (len=%d)", ssid ? ssid[1] : -1))
 		ssid_len = 0;
 	else
 		ssid_len = ssid[1];
@@ -5039,7 +5040,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
 
 	rcu_read_lock();
 	ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
-	if (!ssidie) {
+	if (!ssidie || ssidie[1] > sizeof(assoc_data->ssid)) {
 		rcu_read_unlock();
 		kfree(assoc_data);
 		return -EINVAL;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 2145581..24fddf0 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3429,8 +3429,11 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
 			      NFT_SET_OBJECT))
 			return -EINVAL;
 		/* Only one of these operations is supported */
-		if ((flags & (NFT_SET_MAP | NFT_SET_EVAL | NFT_SET_OBJECT)) ==
-			     (NFT_SET_MAP | NFT_SET_EVAL | NFT_SET_OBJECT))
+		if ((flags & (NFT_SET_MAP | NFT_SET_OBJECT)) ==
+			     (NFT_SET_MAP | NFT_SET_OBJECT))
+			return -EOPNOTSUPP;
+		if ((flags & (NFT_SET_EVAL | NFT_SET_OBJECT)) ==
+			     (NFT_SET_EVAL | NFT_SET_OBJECT))
 			return -EOPNOTSUPP;
 	}
 
diff --git a/net/netfilter/nft_connlimit.c b/net/netfilter/nft_connlimit.c
index af1497a..69d6173 100644
--- a/net/netfilter/nft_connlimit.c
+++ b/net/netfilter/nft_connlimit.c
@@ -218,8 +218,13 @@ static void nft_connlimit_destroy_clone(const struct nft_ctx *ctx,
 static bool nft_connlimit_gc(struct net *net, const struct nft_expr *expr)
 {
 	struct nft_connlimit *priv = nft_expr_priv(expr);
+	bool ret;
 
-	return nf_conncount_gc_list(net, &priv->list);
+	local_bh_disable();
+	ret = nf_conncount_gc_list(net, &priv->list);
+	local_bh_enable();
+
+	return ret;
 }
 
 static struct nft_expr_type nft_connlimit_type;
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index 161c345..55754d9 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -76,9 +76,6 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
 	if (IS_ERR(set))
 		return PTR_ERR(set);
 
-	if (set->flags & NFT_SET_EVAL)
-		return -EOPNOTSUPP;
-
 	priv->sreg = nft_parse_register(tb[NFTA_LOOKUP_SREG]);
 	err = nft_validate_register_load(priv->sreg, set->klen);
 	if (err < 0)
diff --git a/net/netfilter/xt_quota2.c b/net/netfilter/xt_quota2.c
index 24b7742..c427244 100644
--- a/net/netfilter/xt_quota2.c
+++ b/net/netfilter/xt_quota2.c
@@ -296,8 +296,8 @@ static void quota_mt2_destroy(const struct xt_mtdtor_param *par)
 	}
 
 	list_del(&e->list);
-	remove_proc_entry(e->name, proc_xt_quota);
 	spin_unlock_bh(&counter_list_lock);
+	remove_proc_entry(e->name, proc_xt_quota);
 	kfree(e);
 }
 
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 5651bf6..5a81dfb4 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -400,11 +400,13 @@ static void qrtr_tx_resume(struct qrtr_node *node, struct sk_buff *skb)
 	src.sq_port = le32_to_cpu(pkt.client.port);
 	key = (u64)src.sq_node << 32 | src.sq_port;
 
-	flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
-	if (!flow)
-		return;
-
 	mutex_lock(&node->qrtr_tx_lock);
+	flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
+	if (!flow) {
+		mutex_unlock(&node->qrtr_tx_lock);
+		return;
+	}
+
 	atomic_set(&flow->pending, 0);
 	wake_up_interruptible_all(&node->resume_tx);
 
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 7c4a4b8..f2c4bfc 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -1307,11 +1307,16 @@ static int tcf_action_add(struct net *net, struct nlattr *nla,
 			  struct netlink_ext_ack *extack)
 {
 	size_t attr_size = 0;
-	int ret = 0;
+	int loop, ret;
 	struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
 
-	ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, actions,
-			      &attr_size, true, extack);
+	for (loop = 0; loop < 10; loop++) {
+		ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0,
+				      actions, &attr_size, true, extack);
+		if (ret != -EAGAIN)
+			break;
+	}
+
 	if (ret < 0)
 		return ret;
 	ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
@@ -1361,11 +1366,8 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
 		 */
 		if (n->nlmsg_flags & NLM_F_REPLACE)
 			ovr = 1;
-replay:
 		ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr,
 				     extack);
-		if (ret == -EAGAIN)
-			goto replay;
 		break;
 	case RTM_DELACTION:
 		ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 9f5b4e5..227b050 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -8957,7 +8957,7 @@ struct proto sctp_prot = {
 	.backlog_rcv =	sctp_backlog_rcv,
 	.hash        =	sctp_hash,
 	.unhash      =	sctp_unhash,
-	.get_port    =	sctp_get_port,
+	.no_autobind =	true,
 	.obj_size    =  sizeof(struct sctp_sock),
 	.useroffset  =  offsetof(struct sctp_sock, subscribe),
 	.usersize    =  offsetof(struct sctp_sock, initmsg) -
@@ -8999,7 +8999,7 @@ struct proto sctpv6_prot = {
 	.backlog_rcv	= sctp_backlog_rcv,
 	.hash		= sctp_hash,
 	.unhash		= sctp_unhash,
-	.get_port	= sctp_get_port,
+	.no_autobind	= true,
 	.obj_size	= sizeof(struct sctp6_sock),
 	.useroffset	= offsetof(struct sctp6_sock, sctp.subscribe),
 	.usersize	= offsetof(struct sctp6_sock, sctp.initmsg) -
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 2d76663..e7e7ffc 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -200,6 +200,38 @@ cfg80211_get_dev_from_info(struct net *netns, struct genl_info *info)
 	return __cfg80211_rdev_from_attrs(netns, info->attrs);
 }
 
+static int validate_beacon_head(const struct nlattr *attr,
+				struct netlink_ext_ack *extack)
+{
+	const u8 *data = nla_data(attr);
+	unsigned int len = nla_len(attr);
+	const struct element *elem;
+	const struct ieee80211_mgmt *mgmt = (void *)data;
+	unsigned int fixedlen = offsetof(struct ieee80211_mgmt,
+					 u.beacon.variable);
+
+	if (len < fixedlen)
+		goto err;
+
+	if (ieee80211_hdrlen(mgmt->frame_control) !=
+	    offsetof(struct ieee80211_mgmt, u.beacon))
+		goto err;
+
+	data += fixedlen;
+	len -= fixedlen;
+
+	for_each_element(elem, data, len) {
+		/* nothing */
+	}
+
+	if (for_each_element_completed(elem, data, len))
+		return 0;
+
+err:
+	NL_SET_ERR_MSG_ATTR(extack, attr, "malformed beacon head");
+	return -EINVAL;
+}
+
 /* policy for the attributes */
 static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
 	[NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
@@ -2311,6 +2343,8 @@ static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
 
 	control_freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
 
+	memset(chandef, 0, sizeof(*chandef));
+
 	chandef->chan = ieee80211_get_channel(&rdev->wiphy, control_freq);
 	chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
 	chandef->center_freq1 = control_freq;
@@ -2843,7 +2877,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
 
 	if (rdev->ops->get_channel) {
 		int ret;
-		struct cfg80211_chan_def chandef;
+		struct cfg80211_chan_def chandef = {};
 
 		ret = rdev_get_channel(rdev, wdev, &chandef);
 		if (ret == 0) {
@@ -4038,6 +4072,12 @@ static int nl80211_parse_beacon(struct nlattr *attrs[],
 	memset(bcn, 0, sizeof(*bcn));
 
 	if (attrs[NL80211_ATTR_BEACON_HEAD]) {
+		int ret = validate_beacon_head(attrs[NL80211_ATTR_BEACON_HEAD],
+					       NULL);
+
+		if (ret)
+			return ret;
+
 		bcn->head = nla_data(attrs[NL80211_ATTR_BEACON_HEAD]);
 		bcn->head_len = nla_len(attrs[NL80211_ATTR_BEACON_HEAD]);
 		if (!bcn->head_len)
@@ -5832,6 +5872,9 @@ static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info)
 	if (!rdev->ops->del_mpath)
 		return -EOPNOTSUPP;
 
+	if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
+		return -EOPNOTSUPP;
+
 	return rdev_del_mpath(rdev, dev, dst);
 }
 
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 22b7a9b..42bd15c 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2137,7 +2137,7 @@ static void reg_call_notifier(struct wiphy *wiphy,
 
 static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
 {
-	struct cfg80211_chan_def chandef;
+	struct cfg80211_chan_def chandef = {};
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 	enum nl80211_iftype iftype;
 
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 06943d9..4f0cfb8 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -800,7 +800,7 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
-	struct cfg80211_chan_def chandef;
+	struct cfg80211_chan_def chandef = {};
 	int ret;
 
 	switch (wdev->iftype) {
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index c67d7a82..73fd0ea 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -202,6 +202,7 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
 			       struct iw_point *data, char *ssid)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	int ret = 0;
 
 	/* call only for station! */
 	if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
@@ -219,7 +220,10 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
 		if (ie) {
 			data->flags = 1;
 			data->length = ie[1];
-			memcpy(ssid, ie + 2, data->length);
+			if (data->length > IW_ESSID_MAX_SIZE)
+				ret = -EINVAL;
+			else
+				memcpy(ssid, ie + 2, data->length);
 		}
 		rcu_read_unlock();
 	} else if (wdev->wext.connect.ssid && wdev->wext.connect.ssid_len) {
@@ -229,7 +233,7 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
 	}
 	wdev_unlock(wdev);
 
-	return 0;
+	return ret;
 }
 
 int cfg80211_mgd_wext_siwap(struct net_device *dev,
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index a5fe929..f49524e 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2817,7 +2817,7 @@
 # Check for line lengths > 75 in commit log, warn once
 		if ($in_commit_log && !$commit_log_long_line &&
 		    length($line) > 75 &&
-		    !($line =~ /^\s*[a-zA-Z0-9_\/\.\-]+\s+\|\s+\d+/ ||
+		    !($line =~ /^\s*[a-zA-Z0-9_\/\.\-\,]+\s+\|\s+\d+/ ||
 					# file delta changes
 		      $line =~ /^\s*(?:[\w\.\-]+\/)++[\w\.\-]+:/ ||
 					# filename then :
diff --git a/scripts/namespace.pl b/scripts/namespace.pl
index 6135574..1da7bca 100755
--- a/scripts/namespace.pl
+++ b/scripts/namespace.pl
@@ -65,13 +65,14 @@
 use warnings;
 use strict;
 use File::Find;
+use File::Spec;
 
 my $nm = ($ENV{'NM'} || "nm") . " -p";
 my $objdump = ($ENV{'OBJDUMP'} || "objdump") . " -s -j .comment";
-my $srctree = "";
-my $objtree = "";
-$srctree = "$ENV{'srctree'}/" if (exists($ENV{'srctree'}));
-$objtree = "$ENV{'objtree'}/" if (exists($ENV{'objtree'}));
+my $srctree = File::Spec->curdir();
+my $objtree = File::Spec->curdir();
+$srctree = File::Spec->rel2abs($ENV{'srctree'}) if (exists($ENV{'srctree'}));
+$objtree = File::Spec->rel2abs($ENV{'objtree'}) if (exists($ENV{'objtree'}));
 
 if ($#ARGV != -1) {
 	print STDERR "usage: $0 takes no parameters\n";
@@ -231,9 +232,9 @@
 	}
 	($source = $basename) =~ s/\.o$//;
 	if (-e "$source.c" || -e "$source.S") {
-		$source = "$objtree$File::Find::dir/$source";
+		$source = File::Spec->catfile($objtree, $File::Find::dir, $source)
 	} else {
-		$source = "$srctree$File::Find::dir/$source";
+		$source = File::Spec->catfile($srctree, $File::Find::dir, $source)
 	}
 	if (! -e "$source.c" && ! -e "$source.S") {
 		# No obvious source, exclude the object if it is conglomerate
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index d9e7728..f63b4bd 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -271,8 +271,16 @@ static int ima_calc_file_hash_atfm(struct file *file,
 		rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
 		rc = integrity_kernel_read(file, offset, rbuf[active],
 					   rbuf_len);
-		if (rc != rbuf_len)
+		if (rc != rbuf_len) {
+			if (rc >= 0)
+				rc = -EINVAL;
+			/*
+			 * Forward current rc, do not overwrite with return value
+			 * from ahash_wait()
+			 */
+			ahash_wait(ahash_rc, &wait);
 			goto out3;
+		}
 
 		if (rbuf[1] && offset) {
 			/* Using two buffers, and it is not the first
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 109ab51..b1a9ac9 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -100,7 +100,7 @@
 #include "audit.h"
 #include "avc_ss.h"
 
-struct selinux_state selinux_state;
+struct selinux_state selinux_state __rticdata;
 
 /* SECMARK reference count */
 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 107ec7f3..c827a2a 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -3264,6 +3264,8 @@ static int patch_nvhdmi(struct hda_codec *codec)
 		nvhdmi_chmap_cea_alloc_validate_get_type;
 	spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
 
+	codec->link_down_at_suspend = 1;
+
 	return 0;
 }
 
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index e1b08d6..dd46354 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -405,6 +405,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
 	case 0x10ec0700:
 	case 0x10ec0701:
 	case 0x10ec0703:
+	case 0x10ec0711:
 		alc_update_coef_idx(codec, 0x10, 1<<15, 0);
 		break;
 	case 0x10ec0662:
@@ -5676,6 +5677,7 @@ enum {
 	ALC225_FIXUP_WYSE_AUTO_MUTE,
 	ALC225_FIXUP_WYSE_DISABLE_MIC_VREF,
 	ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
+	ALC256_FIXUP_ASUS_HEADSET_MIC,
 	ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
 	ALC299_FIXUP_PREDATOR_SPK,
 	ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
@@ -6692,6 +6694,15 @@ static const struct hda_fixup alc269_fixups[] = {
 		.chained = true,
 		.chain_id = ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE
 	},
+	[ALC256_FIXUP_ASUS_HEADSET_MIC] = {
+		.type = HDA_FIXUP_PINS,
+		.v.pins = (const struct hda_pintbl[]) {
+			{ 0x19, 0x03a11020 }, /* headset mic with jack detect */
+			{ }
+		},
+		.chained = true,
+		.chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
+	},
 	[ALC256_FIXUP_ASUS_MIC_NO_PRESENCE] = {
 		.type = HDA_FIXUP_PINS,
 		.v.pins = (const struct hda_pintbl[]) {
@@ -6888,6 +6899,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
 	SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
 	SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
 	SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC),
+	SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
 	SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
 	SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
 	SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
@@ -7752,6 +7764,7 @@ static int patch_alc269(struct hda_codec *codec)
 	case 0x10ec0700:
 	case 0x10ec0701:
 	case 0x10ec0703:
+	case 0x10ec0711:
 		spec->codec_variant = ALC269_TYPE_ALC700;
 		spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
 		alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
@@ -8883,6 +8896,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
 	HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269),
+	HDA_CODEC_ENTRY(0x10ec0711, "ALC711", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc662),
 	HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880),
 	HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882),
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 18cddf1..64a52d4 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -31,6 +31,13 @@
 #define SGTL5000_DAP_REG_OFFSET	0x0100
 #define SGTL5000_MAX_REG_OFFSET	0x013A
 
+/* Delay for the VAG ramp up */
+#define SGTL5000_VAG_POWERUP_DELAY 500 /* ms */
+/* Delay for the VAG ramp down */
+#define SGTL5000_VAG_POWERDOWN_DELAY 500 /* ms */
+
+#define SGTL5000_OUTPUTS_MUTE (SGTL5000_HP_MUTE | SGTL5000_LINE_OUT_MUTE)
+
 /* default value of sgtl5000 registers */
 static const struct reg_default sgtl5000_reg_defaults[] = {
 	{ SGTL5000_CHIP_DIG_POWER,		0x0000 },
@@ -116,6 +123,13 @@ enum  {
 	I2S_LRCLK_STRENGTH_HIGH,
 };
 
+enum {
+	HP_POWER_EVENT,
+	DAC_POWER_EVENT,
+	ADC_POWER_EVENT,
+	LAST_POWER_EVENT = ADC_POWER_EVENT
+};
+
 /* sgtl5000 private structure in codec */
 struct sgtl5000_priv {
 	int sysclk;	/* sysclk rate */
@@ -129,8 +143,109 @@ struct sgtl5000_priv {
 	u8 micbias_resistor;
 	u8 micbias_voltage;
 	u8 lrclk_strength;
+	u16 mute_state[LAST_POWER_EVENT + 1];
 };
 
+static inline int hp_sel_input(struct snd_soc_component *component)
+{
+	return (snd_soc_component_read32(component, SGTL5000_CHIP_ANA_CTRL) &
+		SGTL5000_HP_SEL_MASK) >> SGTL5000_HP_SEL_SHIFT;
+}
+
+static inline u16 mute_output(struct snd_soc_component *component,
+			      u16 mute_mask)
+{
+	u16 mute_reg = snd_soc_component_read32(component,
+					      SGTL5000_CHIP_ANA_CTRL);
+
+	snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_CTRL,
+			    mute_mask, mute_mask);
+	return mute_reg;
+}
+
+static inline void restore_output(struct snd_soc_component *component,
+				  u16 mute_mask, u16 mute_reg)
+{
+	snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_CTRL,
+		mute_mask, mute_reg);
+}
+
+static void vag_power_on(struct snd_soc_component *component, u32 source)
+{
+	if (snd_soc_component_read32(component, SGTL5000_CHIP_ANA_POWER) &
+	    SGTL5000_VAG_POWERUP)
+		return;
+
+	snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
+			    SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP);
+
+	/* When VAG powering on to get local loop from Line-In, the sleep
+	 * is required to avoid loud pop.
+	 */
+	if (hp_sel_input(component) == SGTL5000_HP_SEL_LINE_IN &&
+	    source == HP_POWER_EVENT)
+		msleep(SGTL5000_VAG_POWERUP_DELAY);
+}
+
+static int vag_power_consumers(struct snd_soc_component *component,
+			       u16 ana_pwr_reg, u32 source)
+{
+	int consumers = 0;
+
+	/* count dac/adc consumers unconditional */
+	if (ana_pwr_reg & SGTL5000_DAC_POWERUP)
+		consumers++;
+	if (ana_pwr_reg & SGTL5000_ADC_POWERUP)
+		consumers++;
+
+	/*
+	 * If the event comes from HP and Line-In is selected,
+	 * current action is 'DAC to be powered down'.
+	 * As HP_POWERUP is not set when HP muxed to line-in,
+	 * we need to keep VAG power ON.
+	 */
+	if (source == HP_POWER_EVENT) {
+		if (hp_sel_input(component) == SGTL5000_HP_SEL_LINE_IN)
+			consumers++;
+	} else {
+		if (ana_pwr_reg & SGTL5000_HP_POWERUP)
+			consumers++;
+	}
+
+	return consumers;
+}
+
+static void vag_power_off(struct snd_soc_component *component, u32 source)
+{
+	u16 ana_pwr = snd_soc_component_read32(component,
+					     SGTL5000_CHIP_ANA_POWER);
+
+	if (!(ana_pwr & SGTL5000_VAG_POWERUP))
+		return;
+
+	/*
+	 * This function calls when any of VAG power consumers is disappearing.
+	 * Thus, if there is more than one consumer at the moment, as minimum
+	 * one consumer will definitely stay after the end of the current
+	 * event.
+	 * Don't clear VAG_POWERUP if 2 or more consumers of VAG present:
+	 * - LINE_IN (for HP events) / HP (for DAC/ADC events)
+	 * - DAC
+	 * - ADC
+	 * (the current consumer is disappearing right now)
+	 */
+	if (vag_power_consumers(component, ana_pwr, source) >= 2)
+		return;
+
+	snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
+		SGTL5000_VAG_POWERUP, 0);
+	/* In power down case, we need wait 400-1000 ms
+	 * when VAG fully ramped down.
+	 * As longer we wait, as smaller pop we've got.
+	 */
+	msleep(SGTL5000_VAG_POWERDOWN_DELAY);
+}
+
 /*
  * mic_bias power on/off share the same register bits with
  * output impedance of mic bias, when power on mic bias, we
@@ -162,36 +277,46 @@ static int mic_bias_event(struct snd_soc_dapm_widget *w,
 	return 0;
 }
 
-/*
- * As manual described, ADC/DAC only works when VAG powerup,
- * So enabled VAG before ADC/DAC up.
- * In power down case, we need wait 400ms when vag fully ramped down.
- */
-static int power_vag_event(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
+static int vag_and_mute_control(struct snd_soc_component *component,
+				 int event, int event_source)
 {
-	struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
-	const u32 mask = SGTL5000_DAC_POWERUP | SGTL5000_ADC_POWERUP;
+	static const u16 mute_mask[] = {
+		/*
+		 * Mask for HP_POWER_EVENT.
+		 * Muxing Headphones have to be wrapped with mute/unmute
+		 * headphones only.
+		 */
+		SGTL5000_HP_MUTE,
+		/*
+		 * Masks for DAC_POWER_EVENT/ADC_POWER_EVENT.
+		 * Muxing DAC or ADC block have to wrapped with mute/unmute
+		 * both headphones and line-out.
+		 */
+		SGTL5000_OUTPUTS_MUTE,
+		SGTL5000_OUTPUTS_MUTE
+	};
+
+	struct sgtl5000_priv *sgtl5000 =
+		snd_soc_component_get_drvdata(component);
 
 	switch (event) {
-	case SND_SOC_DAPM_POST_PMU:
-		snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
-			SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP);
-		msleep(400);
+	case SND_SOC_DAPM_PRE_PMU:
+		sgtl5000->mute_state[event_source] =
+			mute_output(component, mute_mask[event_source]);
 		break;
-
+	case SND_SOC_DAPM_POST_PMU:
+		vag_power_on(component, event_source);
+		restore_output(component, mute_mask[event_source],
+			       sgtl5000->mute_state[event_source]);
+		break;
 	case SND_SOC_DAPM_PRE_PMD:
-		/*
-		 * Don't clear VAG_POWERUP, when both DAC and ADC are
-		 * operational to prevent inadvertently starving the
-		 * other one of them.
-		 */
-		if ((snd_soc_component_read32(component, SGTL5000_CHIP_ANA_POWER) &
-				mask) != mask) {
-			snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
-				SGTL5000_VAG_POWERUP, 0);
-			msleep(400);
-		}
+		sgtl5000->mute_state[event_source] =
+			mute_output(component, mute_mask[event_source]);
+		vag_power_off(component, event_source);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		restore_output(component, mute_mask[event_source],
+			       sgtl5000->mute_state[event_source]);
 		break;
 	default:
 		break;
@@ -200,6 +325,41 @@ static int power_vag_event(struct snd_soc_dapm_widget *w,
 	return 0;
 }
 
+/*
+ * Mute Headphone when power it up/down.
+ * Control VAG power on HP power path.
+ */
+static int headphone_pga_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_component *component =
+		snd_soc_dapm_to_component(w->dapm);
+
+	return vag_and_mute_control(component, event, HP_POWER_EVENT);
+}
+
+/* As manual describes, ADC/DAC powering up/down requires
+ * to mute outputs to avoid pops.
+ * Control VAG power on ADC/DAC power path.
+ */
+static int adc_updown_depop(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_component *component =
+		snd_soc_dapm_to_component(w->dapm);
+
+	return vag_and_mute_control(component, event, ADC_POWER_EVENT);
+}
+
+static int dac_updown_depop(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_component *component =
+		snd_soc_dapm_to_component(w->dapm);
+
+	return vag_and_mute_control(component, event, DAC_POWER_EVENT);
+}
+
 /* input sources for ADC */
 static const char *adc_mux_text[] = {
 	"MIC_IN", "LINE_IN"
@@ -272,7 +432,10 @@ static const struct snd_soc_dapm_widget sgtl5000_dapm_widgets[] = {
 			    mic_bias_event,
 			    SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
 
-	SND_SOC_DAPM_PGA("HP", SGTL5000_CHIP_ANA_POWER, 4, 0, NULL, 0),
+	SND_SOC_DAPM_PGA_E("HP", SGTL5000_CHIP_ANA_POWER, 4, 0, NULL, 0,
+			   headphone_pga_event,
+			   SND_SOC_DAPM_PRE_POST_PMU |
+			   SND_SOC_DAPM_PRE_POST_PMD),
 	SND_SOC_DAPM_PGA("LO", SGTL5000_CHIP_ANA_POWER, 0, 0, NULL, 0),
 
 	SND_SOC_DAPM_MUX("Capture Mux", SND_SOC_NOPM, 0, 0, &adc_mux),
@@ -293,11 +456,12 @@ static const struct snd_soc_dapm_widget sgtl5000_dapm_widgets[] = {
 				0, SGTL5000_CHIP_DIG_POWER,
 				1, 0),
 
-	SND_SOC_DAPM_ADC("ADC", "Capture", SGTL5000_CHIP_ANA_POWER, 1, 0),
-	SND_SOC_DAPM_DAC("DAC", "Playback", SGTL5000_CHIP_ANA_POWER, 3, 0),
-
-	SND_SOC_DAPM_PRE("VAG_POWER_PRE", power_vag_event),
-	SND_SOC_DAPM_POST("VAG_POWER_POST", power_vag_event),
+	SND_SOC_DAPM_ADC_E("ADC", "Capture", SGTL5000_CHIP_ANA_POWER, 1, 0,
+			   adc_updown_depop, SND_SOC_DAPM_PRE_POST_PMU |
+			   SND_SOC_DAPM_PRE_POST_PMD),
+	SND_SOC_DAPM_DAC_E("DAC", "Playback", SGTL5000_CHIP_ANA_POWER, 3, 0,
+			   dac_updown_depop, SND_SOC_DAPM_PRE_POST_PMU |
+			   SND_SOC_DAPM_PRE_POST_PMD),
 };
 
 /* routes for sgtl5000 */
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index d23c2bb..15a3182 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -674,6 +674,7 @@ static int rsnd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
 	}
 
 	/* set format */
+	rdai->bit_clk_inv = 0;
 	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
 	case SND_SOC_DAIFMT_I2S:
 		rdai->sys_delay = 0;
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 28c88c1..5e67b99 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -420,6 +420,9 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
 		ep = 0x81;
 		ifnum = 1;
 		goto add_sync_ep_from_ifnum;
+	case USB_ID(0x0582, 0x01d8): /* BOSS Katana */
+		/* BOSS Katana amplifiers do not need quirks */
+		return 0;
 	}
 
 	if (attr == USB_ENDPOINT_SYNC_ASYNC &&
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
index 95a43cc..bca0c9e 100644
--- a/tools/lib/traceevent/Makefile
+++ b/tools/lib/traceevent/Makefile
@@ -259,8 +259,8 @@
 
 define do_generate_dynamic_list_file
 	symbol_type=`$(NM) -u -D $1 | awk 'NF>1 {print $$1}' | \
-	xargs echo "U W w" | tr ' ' '\n' | sort -u | xargs echo`;\
-	if [ "$$symbol_type" = "U W w" ];then				\
+	xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\
+	if [ "$$symbol_type" = "U W" ];then				\
 		(echo '{';						\
 		$(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\
 		echo '};';						\
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 6ccfd13..382e476 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -254,10 +254,10 @@ static int add_new_comm(struct tep_handle *pevent, const char *comm, int pid)
 		errno = ENOMEM;
 		return -1;
 	}
+	pevent->cmdlines = cmdlines;
 
 	cmdlines[pevent->cmdline_count].comm = strdup(comm);
 	if (!cmdlines[pevent->cmdline_count].comm) {
-		free(cmdlines);
 		errno = ENOMEM;
 		return -1;
 	}
@@ -268,7 +268,6 @@ static int add_new_comm(struct tep_handle *pevent, const char *comm, int pid)
 		pevent->cmdline_count++;
 
 	qsort(cmdlines, pevent->cmdline_count, sizeof(*cmdlines), cmdline_cmp);
-	pevent->cmdlines = cmdlines;
 
 	return 0;
 }
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 849b3be..510caed 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -837,7 +837,7 @@
     JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | awk '{print $$3}')
   else
     ifneq (,$(wildcard /usr/sbin/alternatives))
-      JDIR=$(shell /usr/sbin/alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g')
+      JDIR=$(shell /usr/sbin/alternatives --display java | tail -1 | cut -d' ' -f 5 | sed -e 's%/jre/bin/java.%%g' -e 's%/bin/java.%%g')
     endif
   endif
   ifndef JDIR
diff --git a/tools/perf/arch/x86/util/unwind-libunwind.c b/tools/perf/arch/x86/util/unwind-libunwind.c
index 05920e3e..4735797 100644
--- a/tools/perf/arch/x86/util/unwind-libunwind.c
+++ b/tools/perf/arch/x86/util/unwind-libunwind.c
@@ -1,11 +1,11 @@
 // SPDX-License-Identifier: GPL-2.0
 
 #include <errno.h>
+#include "../../util/debug.h"
 #ifndef REMOTE_UNWIND_LIBUNWIND
 #include <libunwind.h>
 #include "perf_regs.h"
 #include "../../util/unwind.h"
-#include "../../util/debug.h"
 #endif
 
 #ifdef HAVE_ARCH_X86_64_SUPPORT
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 7899625..6aae10f 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -3090,8 +3090,11 @@ int cmd_stat(int argc, const char **argv)
 			fprintf(output, "[ perf stat: executing run #%d ... ]\n",
 				run_idx + 1);
 
+		if (run_idx != 0)
+			perf_evlist__reset_prev_raw_counts(evsel_list);
+
 		status = run_perf_stat(argc, argv, run_idx);
-		if (forever && status != -1) {
+		if (forever && status != -1 && !interval) {
 			print_counters(NULL, argc, argv);
 			perf_stat__reset_stats();
 		}
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 0c70788..3c0d74fc1 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1114,7 +1114,7 @@ static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 lev
 
 	scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
 	if (sysfs__read_str(file, &cache->map, &len)) {
-		free(cache->map);
+		free(cache->size);
 		free(cache->type);
 		return -1;
 	}
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index a186300..663e790 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -394,7 +394,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
 	size_t size;
 	u16 idr_size;
 	const char *sym;
-	uint32_t count;
+	uint64_t count;
 	int ret, csize, usize;
 	pid_t pid, tid;
 	struct {
@@ -417,7 +417,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
 		return -1;
 
 	filename = event->mmap2.filename;
-	size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%u.so",
+	size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so",
 			jd->dir,
 			pid,
 			count);
@@ -530,7 +530,7 @@ static int jit_repipe_code_move(struct jit_buf_desc *jd, union jr_entry *jr)
 		return -1;
 
 	filename = event->mmap2.filename;
-	size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%"PRIu64,
+	size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so",
 	         jd->dir,
 	         pid,
 		 jr->move.code_index);
diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c
index 19262f9..2344d86 100644
--- a/tools/perf/util/llvm-utils.c
+++ b/tools/perf/util/llvm-utils.c
@@ -230,14 +230,14 @@ static int detect_kbuild_dir(char **kbuild_dir)
 	const char *prefix_dir = "";
 	const char *suffix_dir = "";
 
+	/* _UTSNAME_LENGTH is 65 */
+	char release[128];
+
 	char *autoconf_path;
 
 	int err;
 
 	if (!test_dir) {
-		/* _UTSNAME_LENGTH is 65 */
-		char release[128];
-
 		err = fetch_kernel_version(NULL, release,
 					   sizeof(release));
 		if (err)
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index a0061e0..6917ba8 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -154,6 +154,15 @@ static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
 	evsel->prev_raw_counts = NULL;
 }
 
+static void perf_evsel__reset_prev_raw_counts(struct perf_evsel *evsel)
+{
+	if (evsel->prev_raw_counts) {
+		evsel->prev_raw_counts->aggr.val = 0;
+		evsel->prev_raw_counts->aggr.ena = 0;
+		evsel->prev_raw_counts->aggr.run = 0;
+       }
+}
+
 static int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw)
 {
 	int ncpus = perf_evsel__nr_cpus(evsel);
@@ -204,6 +213,14 @@ void perf_evlist__reset_stats(struct perf_evlist *evlist)
 	}
 }
 
+void perf_evlist__reset_prev_raw_counts(struct perf_evlist *evlist)
+{
+	struct perf_evsel *evsel;
+
+	evlist__for_each_entry(evlist, evsel)
+		perf_evsel__reset_prev_raw_counts(evsel);
+}
+
 static void zero_per_pkg(struct perf_evsel *counter)
 {
 	if (counter->per_pkg_mask)
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index 36efb98..e19abb1 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -158,6 +158,7 @@ void perf_stat__collect_metric_expr(struct perf_evlist *);
 int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw);
 void perf_evlist__free_stats(struct perf_evlist *evlist);
 void perf_evlist__reset_stats(struct perf_evlist *evlist);
+void perf_evlist__reset_prev_raw_counts(struct perf_evlist *evlist);
 
 int perf_stat_process_counter(struct perf_stat_config *config,
 			      struct perf_evsel *counter);
diff --git a/tools/testing/nvdimm/test/nfit_test.h b/tools/testing/nvdimm/test/nfit_test.h
index 33752e0..3de57cc 100644
--- a/tools/testing/nvdimm/test/nfit_test.h
+++ b/tools/testing/nvdimm/test/nfit_test.h
@@ -12,6 +12,7 @@
  */
 #ifndef __NFIT_TEST_H__
 #define __NFIT_TEST_H__
+#include <linux/acpi.h>
 #include <linux/list.h>
 #include <linux/uuid.h>
 #include <linux/ioport.h>
@@ -234,9 +235,6 @@ struct nd_intel_lss {
 	__u32 status;
 } __packed;
 
-union acpi_object;
-typedef void *acpi_handle;
-
 typedef struct nfit_test_resource *(*nfit_test_lookup_fn)(resource_size_t);
 typedef union acpi_object *(*nfit_test_evaluate_dsm_fn)(acpi_handle handle,
 		 const guid_t *guid, u64 rev, u64 func,